blob: 098b3a29f6f3a082af32af35d703bf884922ec77 [file] [log] [blame]
David S. Miller51c5d0c42012-07-10 00:49:14 -07001#include <linux/rcupdate.h>
2#include <linux/spinlock.h>
3#include <linux/jiffies.h>
David S. Millerab92bb22012-07-09 16:19:30 -07004#include <linux/module.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -07005#include <linux/cache.h>
David S. Miller51c5d0c42012-07-10 00:49:14 -07006#include <linux/slab.h>
7#include <linux/init.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -07008#include <linux/tcp.h>
Eric Dumazet5815d5e2012-07-19 23:02:34 +00009#include <linux/hash.h>
Julian Anastasovd23ff702012-09-04 11:03:15 +000010#include <linux/tcp_metrics.h>
Eric Dumazet976a7022012-11-16 05:31:53 +000011#include <linux/vmalloc.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -070012
13#include <net/inet_connection_sock.h>
David S. Miller51c5d0c42012-07-10 00:49:14 -070014#include <net/net_namespace.h>
David S. Millerab92bb22012-07-09 16:19:30 -070015#include <net/request_sock.h>
David S. Miller51c5d0c42012-07-10 00:49:14 -070016#include <net/inetpeer.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -070017#include <net/sock.h>
David S. Miller51c5d0c42012-07-10 00:49:14 -070018#include <net/ipv6.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -070019#include <net/dst.h>
20#include <net/tcp.h>
Julian Anastasovd23ff702012-09-04 11:03:15 +000021#include <net/genetlink.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -070022
23int sysctl_tcp_nometrics_save __read_mostly;
24
Christoph Paasch77f99ad2014-01-16 20:01:21 +010025static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
26 struct net *net, unsigned int hash);
27
Yuchung Cheng1fe4c482012-07-19 06:43:06 +000028struct tcp_fastopen_metrics {
29 u16 mss;
Yuchung Chengaab48742012-07-19 06:43:10 +000030 u16 syn_loss:10; /* Recurring Fast Open SYN losses */
31 unsigned long last_syn_loss; /* Last Fast Open SYN loss */
Yuchung Cheng1fe4c482012-07-19 06:43:06 +000032 struct tcp_fastopen_cookie cookie;
33};
34
David S. Miller51c5d0c42012-07-10 00:49:14 -070035struct tcp_metrics_block {
36 struct tcp_metrics_block __rcu *tcpm_next;
37 struct inetpeer_addr tcpm_addr;
38 unsigned long tcpm_stamp;
David S. Miller81166dd2012-07-10 03:14:24 -070039 u32 tcpm_ts;
40 u32 tcpm_ts_stamp;
David S. Miller51c5d0c42012-07-10 00:49:14 -070041 u32 tcpm_lock;
Julian Anastasovd23ff702012-09-04 11:03:15 +000042 u32 tcpm_vals[TCP_METRIC_MAX + 1];
Yuchung Cheng1fe4c482012-07-19 06:43:06 +000043 struct tcp_fastopen_metrics tcpm_fastopen;
Julian Anastasovd23ff702012-09-04 11:03:15 +000044
45 struct rcu_head rcu_head;
David S. Miller51c5d0c42012-07-10 00:49:14 -070046};
47
48static bool tcp_metric_locked(struct tcp_metrics_block *tm,
49 enum tcp_metric_index idx)
50{
51 return tm->tcpm_lock & (1 << idx);
52}
53
54static u32 tcp_metric_get(struct tcp_metrics_block *tm,
55 enum tcp_metric_index idx)
56{
57 return tm->tcpm_vals[idx];
58}
59
60static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm,
61 enum tcp_metric_index idx)
62{
63 return msecs_to_jiffies(tm->tcpm_vals[idx]);
64}
65
66static void tcp_metric_set(struct tcp_metrics_block *tm,
67 enum tcp_metric_index idx,
68 u32 val)
69{
70 tm->tcpm_vals[idx] = val;
71}
72
73static void tcp_metric_set_msecs(struct tcp_metrics_block *tm,
74 enum tcp_metric_index idx,
75 u32 val)
76{
77 tm->tcpm_vals[idx] = jiffies_to_msecs(val);
78}
79
80static bool addr_same(const struct inetpeer_addr *a,
81 const struct inetpeer_addr *b)
82{
83 const struct in6_addr *a6, *b6;
84
85 if (a->family != b->family)
86 return false;
87 if (a->family == AF_INET)
88 return a->addr.a4 == b->addr.a4;
89
90 a6 = (const struct in6_addr *) &a->addr.a6[0];
91 b6 = (const struct in6_addr *) &b->addr.a6[0];
92
93 return ipv6_addr_equal(a6, b6);
94}
95
96struct tcpm_hash_bucket {
97 struct tcp_metrics_block __rcu *chain;
98};
99
100static DEFINE_SPINLOCK(tcp_metrics_lock);
101
Eric Dumazetefeaa552013-05-03 19:12:45 +0000102static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst,
103 bool fastopen_clear)
David S. Miller51c5d0c42012-07-10 00:49:14 -0700104{
105 u32 val;
106
Julian Anastasov9a0a9502012-07-23 10:46:38 +0300107 tm->tcpm_stamp = jiffies;
108
David S. Miller51c5d0c42012-07-10 00:49:14 -0700109 val = 0;
110 if (dst_metric_locked(dst, RTAX_RTT))
111 val |= 1 << TCP_METRIC_RTT;
112 if (dst_metric_locked(dst, RTAX_RTTVAR))
113 val |= 1 << TCP_METRIC_RTTVAR;
114 if (dst_metric_locked(dst, RTAX_SSTHRESH))
115 val |= 1 << TCP_METRIC_SSTHRESH;
116 if (dst_metric_locked(dst, RTAX_CWND))
117 val |= 1 << TCP_METRIC_CWND;
118 if (dst_metric_locked(dst, RTAX_REORDERING))
119 val |= 1 << TCP_METRIC_REORDERING;
120 tm->tcpm_lock = val;
121
122 tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT);
123 tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR);
124 tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
125 tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
126 tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
David S. Miller81166dd2012-07-10 03:14:24 -0700127 tm->tcpm_ts = 0;
128 tm->tcpm_ts_stamp = 0;
Eric Dumazetefeaa552013-05-03 19:12:45 +0000129 if (fastopen_clear) {
130 tm->tcpm_fastopen.mss = 0;
131 tm->tcpm_fastopen.syn_loss = 0;
132 tm->tcpm_fastopen.cookie.len = 0;
133 }
David S. Miller51c5d0c42012-07-10 00:49:14 -0700134}
135
Christoph Paasch77f99ad2014-01-16 20:01:21 +0100136#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
137
138static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
139{
140 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
141 tcpm_suck_dst(tm, dst, false);
142}
143
144#define TCP_METRICS_RECLAIM_DEPTH 5
145#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
146
David S. Miller51c5d0c42012-07-10 00:49:14 -0700147static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
148 struct inetpeer_addr *addr,
Christoph Paasch77f99ad2014-01-16 20:01:21 +0100149 unsigned int hash)
David S. Miller51c5d0c42012-07-10 00:49:14 -0700150{
151 struct tcp_metrics_block *tm;
152 struct net *net;
Christoph Paasch77f99ad2014-01-16 20:01:21 +0100153 bool reclaim = false;
David S. Miller51c5d0c42012-07-10 00:49:14 -0700154
155 spin_lock_bh(&tcp_metrics_lock);
156 net = dev_net(dst->dev);
Christoph Paasch77f99ad2014-01-16 20:01:21 +0100157
158 /* While waiting for the spin-lock the cache might have been populated
159 * with this entry and so we have to check again.
160 */
161 tm = __tcp_get_metrics(addr, net, hash);
162 if (tm == TCP_METRICS_RECLAIM_PTR) {
163 reclaim = true;
164 tm = NULL;
165 }
166 if (tm) {
167 tcpm_check_stamp(tm, dst);
168 goto out_unlock;
169 }
170
David S. Miller51c5d0c42012-07-10 00:49:14 -0700171 if (unlikely(reclaim)) {
172 struct tcp_metrics_block *oldest;
173
174 oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
175 for (tm = rcu_dereference(oldest->tcpm_next); tm;
176 tm = rcu_dereference(tm->tcpm_next)) {
177 if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
178 oldest = tm;
179 }
180 tm = oldest;
181 } else {
182 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
183 if (!tm)
184 goto out_unlock;
185 }
186 tm->tcpm_addr = *addr;
David S. Miller51c5d0c42012-07-10 00:49:14 -0700187
Eric Dumazetefeaa552013-05-03 19:12:45 +0000188 tcpm_suck_dst(tm, dst, true);
David S. Miller51c5d0c42012-07-10 00:49:14 -0700189
190 if (likely(!reclaim)) {
191 tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
192 rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
193 }
194
195out_unlock:
196 spin_unlock_bh(&tcp_metrics_lock);
197 return tm;
198}
199
David S. Miller51c5d0c42012-07-10 00:49:14 -0700200static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
201{
202 if (tm)
203 return tm;
204 if (depth > TCP_METRICS_RECLAIM_DEPTH)
205 return TCP_METRICS_RECLAIM_PTR;
206 return NULL;
207}
208
209static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
210 struct net *net, unsigned int hash)
211{
212 struct tcp_metrics_block *tm;
213 int depth = 0;
214
215 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
216 tm = rcu_dereference(tm->tcpm_next)) {
217 if (addr_same(&tm->tcpm_addr, addr))
218 break;
219 depth++;
220 }
221 return tcp_get_encode(tm, depth);
222}
223
224static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
225 struct dst_entry *dst)
226{
227 struct tcp_metrics_block *tm;
228 struct inetpeer_addr addr;
229 unsigned int hash;
230 struct net *net;
231
232 addr.family = req->rsk_ops->family;
233 switch (addr.family) {
234 case AF_INET:
Eric Dumazet634fb9792013-10-09 15:21:29 -0700235 addr.addr.a4 = inet_rsk(req)->ir_rmt_addr;
David S. Miller51c5d0c42012-07-10 00:49:14 -0700236 hash = (__force unsigned int) addr.addr.a4;
237 break;
Eric Dumazet634fb9792013-10-09 15:21:29 -0700238#if IS_ENABLED(CONFIG_IPV6)
David S. Miller51c5d0c42012-07-10 00:49:14 -0700239 case AF_INET6:
Eric Dumazet634fb9792013-10-09 15:21:29 -0700240 *(struct in6_addr *)addr.addr.a6 = inet_rsk(req)->ir_v6_rmt_addr;
241 hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
David S. Miller51c5d0c42012-07-10 00:49:14 -0700242 break;
Eric Dumazet634fb9792013-10-09 15:21:29 -0700243#endif
David S. Miller51c5d0c42012-07-10 00:49:14 -0700244 default:
245 return NULL;
246 }
247
David S. Miller51c5d0c42012-07-10 00:49:14 -0700248 net = dev_net(dst->dev);
Eric Dumazet5815d5e2012-07-19 23:02:34 +0000249 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
David S. Miller51c5d0c42012-07-10 00:49:14 -0700250
251 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
252 tm = rcu_dereference(tm->tcpm_next)) {
253 if (addr_same(&tm->tcpm_addr, &addr))
254 break;
255 }
256 tcpm_check_stamp(tm, dst);
257 return tm;
258}
259
David S. Miller81166dd2012-07-10 03:14:24 -0700260static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
261{
David S. Miller81166dd2012-07-10 03:14:24 -0700262 struct tcp_metrics_block *tm;
263 struct inetpeer_addr addr;
264 unsigned int hash;
265 struct net *net;
266
267 addr.family = tw->tw_family;
268 switch (addr.family) {
269 case AF_INET:
270 addr.addr.a4 = tw->tw_daddr;
271 hash = (__force unsigned int) addr.addr.a4;
272 break;
Eric Dumazetc2bb06d2013-10-09 03:05:48 -0700273#if IS_ENABLED(CONFIG_IPV6)
David S. Miller81166dd2012-07-10 03:14:24 -0700274 case AF_INET6:
Eric Dumazetefe42082013-10-03 15:42:29 -0700275 *(struct in6_addr *)addr.addr.a6 = tw->tw_v6_daddr;
276 hash = ipv6_addr_hash(&tw->tw_v6_daddr);
David S. Miller81166dd2012-07-10 03:14:24 -0700277 break;
Eric Dumazetc2bb06d2013-10-09 03:05:48 -0700278#endif
David S. Miller81166dd2012-07-10 03:14:24 -0700279 default:
280 return NULL;
281 }
282
David S. Miller81166dd2012-07-10 03:14:24 -0700283 net = twsk_net(tw);
Eric Dumazet5815d5e2012-07-19 23:02:34 +0000284 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
David S. Miller81166dd2012-07-10 03:14:24 -0700285
286 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
287 tm = rcu_dereference(tm->tcpm_next)) {
288 if (addr_same(&tm->tcpm_addr, &addr))
289 break;
290 }
291 return tm;
292}
293
David S. Miller51c5d0c42012-07-10 00:49:14 -0700294static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
295 struct dst_entry *dst,
296 bool create)
297{
298 struct tcp_metrics_block *tm;
299 struct inetpeer_addr addr;
300 unsigned int hash;
301 struct net *net;
David S. Miller51c5d0c42012-07-10 00:49:14 -0700302
303 addr.family = sk->sk_family;
304 switch (addr.family) {
305 case AF_INET:
306 addr.addr.a4 = inet_sk(sk)->inet_daddr;
307 hash = (__force unsigned int) addr.addr.a4;
308 break;
Eric Dumazetc2bb06d2013-10-09 03:05:48 -0700309#if IS_ENABLED(CONFIG_IPV6)
David S. Miller51c5d0c42012-07-10 00:49:14 -0700310 case AF_INET6:
Eric Dumazetefe42082013-10-03 15:42:29 -0700311 *(struct in6_addr *)addr.addr.a6 = sk->sk_v6_daddr;
312 hash = ipv6_addr_hash(&sk->sk_v6_daddr);
David S. Miller51c5d0c42012-07-10 00:49:14 -0700313 break;
Eric Dumazetc2bb06d2013-10-09 03:05:48 -0700314#endif
David S. Miller51c5d0c42012-07-10 00:49:14 -0700315 default:
316 return NULL;
317 }
318
David S. Miller51c5d0c42012-07-10 00:49:14 -0700319 net = dev_net(dst->dev);
Eric Dumazet5815d5e2012-07-19 23:02:34 +0000320 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
David S. Miller51c5d0c42012-07-10 00:49:14 -0700321
322 tm = __tcp_get_metrics(&addr, net, hash);
Christoph Paasch77f99ad2014-01-16 20:01:21 +0100323 if (tm == TCP_METRICS_RECLAIM_PTR)
David S. Miller51c5d0c42012-07-10 00:49:14 -0700324 tm = NULL;
David S. Miller51c5d0c42012-07-10 00:49:14 -0700325 if (!tm && create)
Christoph Paasch77f99ad2014-01-16 20:01:21 +0100326 tm = tcpm_new(dst, &addr, hash);
David S. Miller51c5d0c42012-07-10 00:49:14 -0700327 else
328 tcpm_check_stamp(tm, dst);
329
330 return tm;
331}
332
David S. Miller4aabd8e2012-07-09 16:07:30 -0700333/* Save metrics learned by this TCP session. This function is called
334 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
335 * or goes from LAST-ACK to CLOSE.
336 */
337void tcp_update_metrics(struct sock *sk)
338{
David S. Miller51c5d0c42012-07-10 00:49:14 -0700339 const struct inet_connection_sock *icsk = inet_csk(sk);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700340 struct dst_entry *dst = __sk_dst_get(sk);
David S. Miller51c5d0c42012-07-10 00:49:14 -0700341 struct tcp_sock *tp = tcp_sk(sk);
342 struct tcp_metrics_block *tm;
343 unsigned long rtt;
344 u32 val;
345 int m;
David S. Miller4aabd8e2012-07-09 16:07:30 -0700346
David S. Miller51c5d0c42012-07-10 00:49:14 -0700347 if (sysctl_tcp_nometrics_save || !dst)
David S. Miller4aabd8e2012-07-09 16:07:30 -0700348 return;
349
David S. Miller51c5d0c42012-07-10 00:49:14 -0700350 if (dst->flags & DST_HOST)
David S. Miller4aabd8e2012-07-09 16:07:30 -0700351 dst_confirm(dst);
352
David S. Miller51c5d0c42012-07-10 00:49:14 -0700353 rcu_read_lock();
354 if (icsk->icsk_backoff || !tp->srtt) {
355 /* This session failed to estimate rtt. Why?
356 * Probably, no packets returned in time. Reset our
357 * results.
David S. Miller4aabd8e2012-07-09 16:07:30 -0700358 */
David S. Miller51c5d0c42012-07-10 00:49:14 -0700359 tm = tcp_get_metrics(sk, dst, false);
360 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
361 tcp_metric_set(tm, TCP_METRIC_RTT, 0);
362 goto out_unlock;
363 } else
364 tm = tcp_get_metrics(sk, dst, true);
365
366 if (!tm)
367 goto out_unlock;
368
369 rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
370 m = rtt - tp->srtt;
371
372 /* If newly calculated rtt larger than stored one, store new
373 * one. Otherwise, use EWMA. Remember, rtt overestimation is
374 * always better than underestimation.
375 */
376 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
377 if (m <= 0)
378 rtt = tp->srtt;
379 else
380 rtt -= (m >> 3);
381 tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt);
382 }
383
384 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
385 unsigned long var;
386
387 if (m < 0)
388 m = -m;
389
390 /* Scale deviation to rttvar fixed point */
391 m >>= 1;
392 if (m < tp->mdev)
393 m = tp->mdev;
394
395 var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
396 if (m >= var)
397 var = m;
398 else
399 var -= (var - m) >> 2;
400
401 tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var);
402 }
403
404 if (tcp_in_initial_slowstart(tp)) {
405 /* Slow start still did not finish. */
406 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
407 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
408 if (val && (tp->snd_cwnd >> 1) > val)
409 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
410 tp->snd_cwnd >> 1);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700411 }
David S. Miller51c5d0c42012-07-10 00:49:14 -0700412 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
413 val = tcp_metric_get(tm, TCP_METRIC_CWND);
414 if (tp->snd_cwnd > val)
415 tcp_metric_set(tm, TCP_METRIC_CWND,
416 tp->snd_cwnd);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700417 }
David S. Miller51c5d0c42012-07-10 00:49:14 -0700418 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
419 icsk->icsk_ca_state == TCP_CA_Open) {
420 /* Cong. avoidance phase, cwnd is reliable. */
421 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
422 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
423 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
424 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
425 val = tcp_metric_get(tm, TCP_METRIC_CWND);
Alexander Duyck21008442012-07-11 17:18:04 -0700426 tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700427 }
David S. Miller51c5d0c42012-07-10 00:49:14 -0700428 } else {
429 /* Else slow start did not finish, cwnd is non-sense,
430 * ssthresh may be also invalid.
431 */
432 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
433 val = tcp_metric_get(tm, TCP_METRIC_CWND);
434 tcp_metric_set(tm, TCP_METRIC_CWND,
435 (val + tp->snd_ssthresh) >> 1);
436 }
437 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
438 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
439 if (val && tp->snd_ssthresh > val)
440 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
441 tp->snd_ssthresh);
442 }
443 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
444 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
445 if (val < tp->reordering &&
David S. Miller4aabd8e2012-07-09 16:07:30 -0700446 tp->reordering != sysctl_tcp_reordering)
David S. Miller51c5d0c42012-07-10 00:49:14 -0700447 tcp_metric_set(tm, TCP_METRIC_REORDERING,
448 tp->reordering);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700449 }
450 }
David S. Miller51c5d0c42012-07-10 00:49:14 -0700451 tm->tcpm_stamp = jiffies;
452out_unlock:
453 rcu_read_unlock();
David S. Miller4aabd8e2012-07-09 16:07:30 -0700454}
455
456/* Initialize metrics on socket. */
457
458void tcp_init_metrics(struct sock *sk)
459{
David S. Miller4aabd8e2012-07-09 16:07:30 -0700460 struct dst_entry *dst = __sk_dst_get(sk);
David S. Miller51c5d0c42012-07-10 00:49:14 -0700461 struct tcp_sock *tp = tcp_sk(sk);
462 struct tcp_metrics_block *tm;
Yuchung Cheng1b7fdd22013-08-30 08:35:53 -0700463 u32 val, crtt = 0; /* cached RTT scaled by 8 */
David S. Miller4aabd8e2012-07-09 16:07:30 -0700464
465 if (dst == NULL)
466 goto reset;
467
468 dst_confirm(dst);
469
David S. Miller51c5d0c42012-07-10 00:49:14 -0700470 rcu_read_lock();
471 tm = tcp_get_metrics(sk, dst, true);
472 if (!tm) {
473 rcu_read_unlock();
474 goto reset;
475 }
476
477 if (tcp_metric_locked(tm, TCP_METRIC_CWND))
478 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
479
480 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
481 if (val) {
482 tp->snd_ssthresh = val;
David S. Miller4aabd8e2012-07-09 16:07:30 -0700483 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
484 tp->snd_ssthresh = tp->snd_cwnd_clamp;
485 } else {
486 /* ssthresh may have been reduced unnecessarily during.
487 * 3WHS. Restore it back to its initial default.
488 */
489 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
490 }
David S. Miller51c5d0c42012-07-10 00:49:14 -0700491 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
492 if (val && tp->reordering != val) {
David S. Miller4aabd8e2012-07-09 16:07:30 -0700493 tcp_disable_fack(tp);
494 tcp_disable_early_retrans(tp);
David S. Miller51c5d0c42012-07-10 00:49:14 -0700495 tp->reordering = val;
David S. Miller4aabd8e2012-07-09 16:07:30 -0700496 }
497
Yuchung Cheng1b7fdd22013-08-30 08:35:53 -0700498 crtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
David S. Miller51c5d0c42012-07-10 00:49:14 -0700499 rcu_read_unlock();
David S. Miller4aabd8e2012-07-09 16:07:30 -0700500reset:
Yuchung Cheng52f20e62013-09-03 14:14:35 -0700501 /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
502 * to seed the RTO for later data packets because SYN packets are
503 * small. Use the per-dst cached values to seed the RTO but keep
504 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
505 * Later the RTO will be updated immediately upon obtaining the first
506 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
507 * influences the first RTO but not later RTT estimation.
508 *
509 * But if RTT is not available from the SYN (due to retransmits or
510 * syn cookies) or the cache, force a conservative 3secs timeout.
511 *
512 * A bit of theory. RTT is time passed after "normal" sized packet
513 * is sent until it is ACKed. In normal circumstances sending small
514 * packets force peer to delay ACKs and calculation is correct too.
515 * The algorithm is adaptive and, provided we follow specs, it
516 * NEVER underestimate RTT. BUT! If peer tries to make some clever
517 * tricks sort of "quick acks" for time long enough to decrease RTT
518 * to low value, and then abruptly stops to do it and starts to delay
519 * ACKs, wait for troubles.
520 */
Yuchung Cheng1b7fdd22013-08-30 08:35:53 -0700521 if (crtt > tp->srtt) {
Neal Cardwell269aa752013-09-16 21:44:20 -0400522 /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
523 crtt >>= 3;
524 inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
Yuchung Cheng1b7fdd22013-08-30 08:35:53 -0700525 } else if (tp->srtt == 0) {
David S. Miller4aabd8e2012-07-09 16:07:30 -0700526 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
527 * 3WHS. This is most likely due to retransmission,
528 * including spurious one. Reset the RTO back to 3secs
529 * from the more aggressive 1sec to avoid more spurious
530 * retransmission.
531 */
532 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
533 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
534 }
535 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
536 * retransmitted. In light of RFC6298 more aggressive 1sec
537 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
538 * retransmission has occurred.
539 */
540 if (tp->total_retrans > 1)
541 tp->snd_cwnd = 1;
542 else
543 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
544 tp->snd_cwnd_stamp = tcp_time_stamp;
545}
David S. Millerab92bb22012-07-09 16:19:30 -0700546
David S. Miller81166dd2012-07-10 03:14:24 -0700547bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check)
David S. Millerab92bb22012-07-09 16:19:30 -0700548{
David S. Miller51c5d0c42012-07-10 00:49:14 -0700549 struct tcp_metrics_block *tm;
550 bool ret;
551
David S. Millerab92bb22012-07-09 16:19:30 -0700552 if (!dst)
553 return false;
David S. Miller51c5d0c42012-07-10 00:49:14 -0700554
555 rcu_read_lock();
556 tm = __tcp_get_metrics_req(req, dst);
David S. Miller81166dd2012-07-10 03:14:24 -0700557 if (paws_check) {
558 if (tm &&
559 (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
560 (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW)
561 ret = false;
562 else
563 ret = true;
564 } else {
565 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
566 ret = true;
567 else
568 ret = false;
569 }
David S. Miller51c5d0c42012-07-10 00:49:14 -0700570 rcu_read_unlock();
571
572 return ret;
David S. Millerab92bb22012-07-09 16:19:30 -0700573}
574EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
David S. Miller51c5d0c42012-07-10 00:49:14 -0700575
David S. Miller81166dd2012-07-10 03:14:24 -0700576void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
577{
578 struct tcp_metrics_block *tm;
579
580 rcu_read_lock();
581 tm = tcp_get_metrics(sk, dst, true);
582 if (tm) {
583 struct tcp_sock *tp = tcp_sk(sk);
584
585 if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
586 tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
587 tp->rx_opt.ts_recent = tm->tcpm_ts;
588 }
589 }
590 rcu_read_unlock();
591}
592EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
593
594/* VJ's idea. Save last timestamp seen from this destination and hold
595 * it at least for normal timewait interval to use for duplicate
596 * segment detection in subsequent connections, before they enter
597 * synchronized state.
598 */
599bool tcp_remember_stamp(struct sock *sk)
600{
601 struct dst_entry *dst = __sk_dst_get(sk);
602 bool ret = false;
603
604 if (dst) {
605 struct tcp_metrics_block *tm;
606
607 rcu_read_lock();
608 tm = tcp_get_metrics(sk, dst, true);
609 if (tm) {
610 struct tcp_sock *tp = tcp_sk(sk);
611
612 if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
613 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
614 tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
615 tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
616 tm->tcpm_ts = tp->rx_opt.ts_recent;
617 }
618 ret = true;
619 }
620 rcu_read_unlock();
621 }
622 return ret;
623}
624
625bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
626{
627 struct tcp_metrics_block *tm;
628 bool ret = false;
629
630 rcu_read_lock();
631 tm = __tcp_get_metrics_tw(tw);
Julian Anastasov9a0a9502012-07-23 10:46:38 +0300632 if (tm) {
David S. Miller81166dd2012-07-10 03:14:24 -0700633 const struct tcp_timewait_sock *tcptw;
634 struct sock *sk = (struct sock *) tw;
635
636 tcptw = tcp_twsk(sk);
637 if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
638 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
639 tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
640 tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
641 tm->tcpm_ts = tcptw->tw_ts_recent;
642 }
643 ret = true;
644 }
645 rcu_read_unlock();
646
647 return ret;
648}
649
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000650static DEFINE_SEQLOCK(fastopen_seqlock);
651
652void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
Yuchung Chengaab48742012-07-19 06:43:10 +0000653 struct tcp_fastopen_cookie *cookie,
654 int *syn_loss, unsigned long *last_syn_loss)
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000655{
656 struct tcp_metrics_block *tm;
657
658 rcu_read_lock();
659 tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
660 if (tm) {
661 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
662 unsigned int seq;
663
664 do {
665 seq = read_seqbegin(&fastopen_seqlock);
666 if (tfom->mss)
667 *mss = tfom->mss;
668 *cookie = tfom->cookie;
Yuchung Chengaab48742012-07-19 06:43:10 +0000669 *syn_loss = tfom->syn_loss;
670 *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000671 } while (read_seqretry(&fastopen_seqlock, seq));
672 }
673 rcu_read_unlock();
674}
675
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000676void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
Yuchung Chengaab48742012-07-19 06:43:10 +0000677 struct tcp_fastopen_cookie *cookie, bool syn_lost)
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000678{
Eric Dumazetdccf76c2013-11-13 15:00:46 -0800679 struct dst_entry *dst = __sk_dst_get(sk);
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000680 struct tcp_metrics_block *tm;
681
Eric Dumazetdccf76c2013-11-13 15:00:46 -0800682 if (!dst)
683 return;
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000684 rcu_read_lock();
Eric Dumazetdccf76c2013-11-13 15:00:46 -0800685 tm = tcp_get_metrics(sk, dst, true);
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000686 if (tm) {
687 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
688
689 write_seqlock_bh(&fastopen_seqlock);
Yuchung Chengc9686012013-10-29 10:09:05 -0700690 if (mss)
691 tfom->mss = mss;
692 if (cookie && cookie->len > 0)
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000693 tfom->cookie = *cookie;
Yuchung Chengaab48742012-07-19 06:43:10 +0000694 if (syn_lost) {
695 ++tfom->syn_loss;
696 tfom->last_syn_loss = jiffies;
697 } else
698 tfom->syn_loss = 0;
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000699 write_sequnlock_bh(&fastopen_seqlock);
700 }
701 rcu_read_unlock();
702}
703
Julian Anastasovd23ff702012-09-04 11:03:15 +0000704static struct genl_family tcp_metrics_nl_family = {
705 .id = GENL_ID_GENERATE,
706 .hdrsize = 0,
707 .name = TCP_METRICS_GENL_NAME,
708 .version = TCP_METRICS_GENL_VERSION,
709 .maxattr = TCP_METRICS_ATTR_MAX,
710 .netnsok = true,
711};
712
713static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
714 [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
715 [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
716 .len = sizeof(struct in6_addr), },
717 /* Following attributes are not received for GET/DEL,
718 * we keep them for reference
719 */
720#if 0
721 [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, },
722 [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, },
723 [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, },
724 [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, },
725 [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, },
726 [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, },
727 [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, },
728 [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
729 .len = TCP_FASTOPEN_COOKIE_MAX, },
730#endif
731};
732
733/* Add attributes, caller cancels its header on failure */
734static int tcp_metrics_fill_info(struct sk_buff *msg,
735 struct tcp_metrics_block *tm)
736{
737 struct nlattr *nest;
738 int i;
739
740 switch (tm->tcpm_addr.family) {
741 case AF_INET:
742 if (nla_put_be32(msg, TCP_METRICS_ATTR_ADDR_IPV4,
743 tm->tcpm_addr.addr.a4) < 0)
744 goto nla_put_failure;
745 break;
746 case AF_INET6:
747 if (nla_put(msg, TCP_METRICS_ATTR_ADDR_IPV6, 16,
748 tm->tcpm_addr.addr.a6) < 0)
749 goto nla_put_failure;
750 break;
751 default:
752 return -EAFNOSUPPORT;
753 }
754
755 if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
756 jiffies - tm->tcpm_stamp) < 0)
757 goto nla_put_failure;
758 if (tm->tcpm_ts_stamp) {
759 if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
760 (s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
761 goto nla_put_failure;
762 if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
763 tm->tcpm_ts) < 0)
764 goto nla_put_failure;
765 }
766
767 {
768 int n = 0;
769
770 nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
771 if (!nest)
772 goto nla_put_failure;
773 for (i = 0; i < TCP_METRIC_MAX + 1; i++) {
774 if (!tm->tcpm_vals[i])
775 continue;
776 if (nla_put_u32(msg, i + 1, tm->tcpm_vals[i]) < 0)
777 goto nla_put_failure;
778 n++;
779 }
780 if (n)
781 nla_nest_end(msg, nest);
782 else
783 nla_nest_cancel(msg, nest);
784 }
785
786 {
787 struct tcp_fastopen_metrics tfom_copy[1], *tfom;
788 unsigned int seq;
789
790 do {
791 seq = read_seqbegin(&fastopen_seqlock);
792 tfom_copy[0] = tm->tcpm_fastopen;
793 } while (read_seqretry(&fastopen_seqlock, seq));
794
795 tfom = tfom_copy;
796 if (tfom->mss &&
797 nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
798 tfom->mss) < 0)
799 goto nla_put_failure;
800 if (tfom->syn_loss &&
801 (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
802 tfom->syn_loss) < 0 ||
803 nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
804 jiffies - tfom->last_syn_loss) < 0))
805 goto nla_put_failure;
806 if (tfom->cookie.len > 0 &&
807 nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
808 tfom->cookie.len, tfom->cookie.val) < 0)
809 goto nla_put_failure;
810 }
811
812 return 0;
813
814nla_put_failure:
815 return -EMSGSIZE;
816}
817
818static int tcp_metrics_dump_info(struct sk_buff *skb,
819 struct netlink_callback *cb,
820 struct tcp_metrics_block *tm)
821{
822 void *hdr;
823
Eric W. Biederman15e47302012-09-07 20:12:54 +0000824 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
Julian Anastasovd23ff702012-09-04 11:03:15 +0000825 &tcp_metrics_nl_family, NLM_F_MULTI,
826 TCP_METRICS_CMD_GET);
827 if (!hdr)
828 return -EMSGSIZE;
829
830 if (tcp_metrics_fill_info(skb, tm) < 0)
831 goto nla_put_failure;
832
833 return genlmsg_end(skb, hdr);
834
835nla_put_failure:
836 genlmsg_cancel(skb, hdr);
837 return -EMSGSIZE;
838}
839
840static int tcp_metrics_nl_dump(struct sk_buff *skb,
841 struct netlink_callback *cb)
842{
843 struct net *net = sock_net(skb->sk);
844 unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
845 unsigned int row, s_row = cb->args[0];
846 int s_col = cb->args[1], col = s_col;
847
848 for (row = s_row; row < max_rows; row++, s_col = 0) {
849 struct tcp_metrics_block *tm;
850 struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash + row;
851
852 rcu_read_lock();
853 for (col = 0, tm = rcu_dereference(hb->chain); tm;
854 tm = rcu_dereference(tm->tcpm_next), col++) {
855 if (col < s_col)
856 continue;
857 if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
858 rcu_read_unlock();
859 goto done;
860 }
861 }
862 rcu_read_unlock();
863 }
864
865done:
866 cb->args[0] = row;
867 cb->args[1] = col;
868 return skb->len;
869}
870
871static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
872 unsigned int *hash, int optional)
873{
874 struct nlattr *a;
875
876 a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV4];
877 if (a) {
878 addr->family = AF_INET;
879 addr->addr.a4 = nla_get_be32(a);
880 *hash = (__force unsigned int) addr->addr.a4;
881 return 0;
882 }
883 a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV6];
884 if (a) {
Julian Anastasov2c42a3f2012-10-30 12:03:09 +0000885 if (nla_len(a) != sizeof(struct in6_addr))
Julian Anastasovd23ff702012-09-04 11:03:15 +0000886 return -EINVAL;
887 addr->family = AF_INET6;
888 memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6));
889 *hash = ipv6_addr_hash((struct in6_addr *) addr->addr.a6);
890 return 0;
891 }
892 return optional ? 1 : -EAFNOSUPPORT;
893}
894
895static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
896{
897 struct tcp_metrics_block *tm;
898 struct inetpeer_addr addr;
899 unsigned int hash;
900 struct sk_buff *msg;
901 struct net *net = genl_info_net(info);
902 void *reply;
903 int ret;
904
905 ret = parse_nl_addr(info, &addr, &hash, 0);
906 if (ret < 0)
907 return ret;
908
909 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
910 if (!msg)
911 return -ENOMEM;
912
913 reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
914 info->genlhdr->cmd);
915 if (!reply)
916 goto nla_put_failure;
917
918 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
919 ret = -ESRCH;
920 rcu_read_lock();
921 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
922 tm = rcu_dereference(tm->tcpm_next)) {
923 if (addr_same(&tm->tcpm_addr, &addr)) {
924 ret = tcp_metrics_fill_info(msg, tm);
925 break;
926 }
927 }
928 rcu_read_unlock();
929 if (ret < 0)
930 goto out_free;
931
932 genlmsg_end(msg, reply);
933 return genlmsg_reply(msg, info);
934
935nla_put_failure:
936 ret = -EMSGSIZE;
937
938out_free:
939 nlmsg_free(msg);
940 return ret;
941}
942
943#define deref_locked_genl(p) \
944 rcu_dereference_protected(p, lockdep_genl_is_held() && \
945 lockdep_is_held(&tcp_metrics_lock))
946
947#define deref_genl(p) rcu_dereference_protected(p, lockdep_genl_is_held())
948
949static int tcp_metrics_flush_all(struct net *net)
950{
951 unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
952 struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash;
953 struct tcp_metrics_block *tm;
954 unsigned int row;
955
956 for (row = 0; row < max_rows; row++, hb++) {
957 spin_lock_bh(&tcp_metrics_lock);
958 tm = deref_locked_genl(hb->chain);
959 if (tm)
960 hb->chain = NULL;
961 spin_unlock_bh(&tcp_metrics_lock);
962 while (tm) {
963 struct tcp_metrics_block *next;
964
965 next = deref_genl(tm->tcpm_next);
966 kfree_rcu(tm, rcu_head);
967 tm = next;
968 }
969 }
970 return 0;
971}
972
973static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
974{
975 struct tcpm_hash_bucket *hb;
976 struct tcp_metrics_block *tm;
977 struct tcp_metrics_block __rcu **pp;
978 struct inetpeer_addr addr;
979 unsigned int hash;
980 struct net *net = genl_info_net(info);
981 int ret;
982
983 ret = parse_nl_addr(info, &addr, &hash, 1);
984 if (ret < 0)
985 return ret;
986 if (ret > 0)
987 return tcp_metrics_flush_all(net);
988
989 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
990 hb = net->ipv4.tcp_metrics_hash + hash;
991 pp = &hb->chain;
992 spin_lock_bh(&tcp_metrics_lock);
993 for (tm = deref_locked_genl(*pp); tm;
994 pp = &tm->tcpm_next, tm = deref_locked_genl(*pp)) {
995 if (addr_same(&tm->tcpm_addr, &addr)) {
996 *pp = tm->tcpm_next;
997 break;
998 }
999 }
1000 spin_unlock_bh(&tcp_metrics_lock);
1001 if (!tm)
1002 return -ESRCH;
1003 kfree_rcu(tm, rcu_head);
1004 return 0;
1005}
1006
Johannes Berg4534de82013-11-14 17:14:46 +01001007static const struct genl_ops tcp_metrics_nl_ops[] = {
Julian Anastasovd23ff702012-09-04 11:03:15 +00001008 {
1009 .cmd = TCP_METRICS_CMD_GET,
1010 .doit = tcp_metrics_nl_cmd_get,
1011 .dumpit = tcp_metrics_nl_dump,
1012 .policy = tcp_metrics_nl_policy,
1013 .flags = GENL_ADMIN_PERM,
1014 },
1015 {
1016 .cmd = TCP_METRICS_CMD_DEL,
1017 .doit = tcp_metrics_nl_cmd_del,
1018 .policy = tcp_metrics_nl_policy,
1019 .flags = GENL_ADMIN_PERM,
1020 },
1021};
1022
Eric Dumazet5815d5e2012-07-19 23:02:34 +00001023static unsigned int tcpmhash_entries;
David S. Miller51c5d0c42012-07-10 00:49:14 -07001024static int __init set_tcpmhash_entries(char *str)
1025{
1026 ssize_t ret;
1027
1028 if (!str)
1029 return 0;
1030
Eric Dumazet5815d5e2012-07-19 23:02:34 +00001031 ret = kstrtouint(str, 0, &tcpmhash_entries);
David S. Miller51c5d0c42012-07-10 00:49:14 -07001032 if (ret)
1033 return 0;
1034
1035 return 1;
1036}
1037__setup("tcpmhash_entries=", set_tcpmhash_entries);
1038
1039static int __net_init tcp_net_metrics_init(struct net *net)
1040{
Eric Dumazet5815d5e2012-07-19 23:02:34 +00001041 size_t size;
1042 unsigned int slots;
David S. Miller51c5d0c42012-07-10 00:49:14 -07001043
1044 slots = tcpmhash_entries;
1045 if (!slots) {
1046 if (totalram_pages >= 128 * 1024)
1047 slots = 16 * 1024;
1048 else
1049 slots = 8 * 1024;
1050 }
1051
Eric Dumazet5815d5e2012-07-19 23:02:34 +00001052 net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
1053 size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
David S. Miller51c5d0c42012-07-10 00:49:14 -07001054
Eric Dumazet976a7022012-11-16 05:31:53 +00001055 net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1056 if (!net->ipv4.tcp_metrics_hash)
1057 net->ipv4.tcp_metrics_hash = vzalloc(size);
1058
David S. Miller51c5d0c42012-07-10 00:49:14 -07001059 if (!net->ipv4.tcp_metrics_hash)
1060 return -ENOMEM;
1061
David S. Miller51c5d0c42012-07-10 00:49:14 -07001062 return 0;
1063}
1064
1065static void __net_exit tcp_net_metrics_exit(struct net *net)
1066{
Eric Dumazet36471012012-08-09 11:19:13 +02001067 unsigned int i;
1068
1069 for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) {
1070 struct tcp_metrics_block *tm, *next;
1071
1072 tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1);
1073 while (tm) {
1074 next = rcu_dereference_protected(tm->tcpm_next, 1);
1075 kfree(tm);
1076 tm = next;
1077 }
1078 }
Eric Dumazet976a7022012-11-16 05:31:53 +00001079 if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash))
1080 vfree(net->ipv4.tcp_metrics_hash);
1081 else
1082 kfree(net->ipv4.tcp_metrics_hash);
David S. Miller51c5d0c42012-07-10 00:49:14 -07001083}
1084
1085static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1086 .init = tcp_net_metrics_init,
1087 .exit = tcp_net_metrics_exit,
1088};
1089
1090void __init tcp_metrics_init(void)
1091{
Julian Anastasovd23ff702012-09-04 11:03:15 +00001092 int ret;
1093
1094 ret = register_pernet_subsys(&tcp_net_metrics_ops);
1095 if (ret < 0)
1096 goto cleanup;
1097 ret = genl_register_family_with_ops(&tcp_metrics_nl_family,
Johannes Bergc53ed742013-11-19 15:19:31 +01001098 tcp_metrics_nl_ops);
Julian Anastasovd23ff702012-09-04 11:03:15 +00001099 if (ret < 0)
1100 goto cleanup_subsys;
1101 return;
1102
1103cleanup_subsys:
1104 unregister_pernet_subsys(&tcp_net_metrics_ops);
1105
1106cleanup:
1107 return;
David S. Miller51c5d0c42012-07-10 00:49:14 -07001108}