blob: f5cd986e1e504229936ebe097997fd34556c2ba4 [file] [log] [blame]
Per Lidenb97bf3f2006-01-02 19:04:38 +01001/*
2 * net/tipc/link.c: TIPC link code
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003 *
Jon Paul Maloye74a3862016-03-03 14:23:21 -05004 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
Ying Xue198d73b2013-06-17 10:54:42 -04005 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
Per Lidenb97bf3f2006-01-02 19:04:38 +01006 * All rights reserved.
7 *
Per Liden9ea1fd32006-01-11 13:30:43 +01008 * Redistribution and use in source and binary forms, with or without
Per Lidenb97bf3f2006-01-02 19:04:38 +01009 * modification, are permitted provided that the following conditions are met:
10 *
Per Liden9ea1fd32006-01-11 13:30:43 +010011 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
Per Lidenb97bf3f2006-01-02 19:04:38 +010019 *
Per Liden9ea1fd32006-01-11 13:30:43 +010020 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
Per Lidenb97bf3f2006-01-02 19:04:38 +010034 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -040038#include "subscr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010039#include "link.h"
Richard Alpe7be57fc2014-11-20 10:29:12 +010040#include "bcast.h"
Jon Paul Maloy9816f062014-05-14 05:39:15 -040041#include "socket.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010042#include "name_distr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010043#include "discover.h"
Richard Alpe0655f6a2014-11-20 10:29:07 +010044#include "netlink.h"
Jon Paul Maloy35c55c92016-06-13 20:46:22 -040045#include "monitor.h"
Tuong Lienb4b97712018-12-19 09:17:56 +070046#include "trace.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010047
Ying Xue796c75d2013-06-17 10:54:48 -040048#include <linux/pkt_sched.h>
49
Jon Paul Maloy38206d52015-11-19 14:30:46 -050050struct tipc_stats {
Jon Paul Maloy95901122016-11-25 10:35:02 -050051 u32 sent_pkts;
52 u32 recv_pkts;
Jon Paul Maloy38206d52015-11-19 14:30:46 -050053 u32 sent_states;
54 u32 recv_states;
55 u32 sent_probes;
56 u32 recv_probes;
57 u32 sent_nacks;
58 u32 recv_nacks;
59 u32 sent_acks;
60 u32 sent_bundled;
61 u32 sent_bundles;
62 u32 recv_bundled;
63 u32 recv_bundles;
64 u32 retransmitted;
65 u32 sent_fragmented;
66 u32 sent_fragments;
67 u32 recv_fragmented;
68 u32 recv_fragments;
69 u32 link_congs; /* # port sends blocked by congestion */
70 u32 deferred_recv;
71 u32 duplicates;
72 u32 max_queue_sz; /* send queue size high water mark */
73 u32 accu_queue_sz; /* used for send queue size profiling */
74 u32 queue_sz_counts; /* used for send queue size profiling */
75 u32 msg_length_counts; /* used for message length profiling */
76 u32 msg_lengths_total; /* used for message length profiling */
77 u32 msg_length_profile[7]; /* used for msg. length profiling */
78};
79
80/**
81 * struct tipc_link - TIPC link data structure
82 * @addr: network address of link's peer node
83 * @name: link name character string
84 * @media_addr: media address to use when sending messages over link
85 * @timer: link timer
86 * @net: pointer to namespace struct
87 * @refcnt: reference counter for permanent references (owner node & timer)
88 * @peer_session: link session # being used by peer end of link
89 * @peer_bearer_id: bearer id used by link's peer endpoint
90 * @bearer_id: local bearer id used by link
91 * @tolerance: minimum link continuity loss needed to reset link [in ms]
Jon Paul Maloy38206d52015-11-19 14:30:46 -050092 * @abort_limit: # of unacknowledged continuity probes needed to reset link
93 * @state: current state of link FSM
94 * @peer_caps: bitmap describing capabilities of peer node
95 * @silent_intv_cnt: # of timer intervals without any reception from peer
96 * @proto_msg: template for control messages generated by link
97 * @pmsg: convenience pointer to "proto_msg" field
98 * @priority: current link priority
99 * @net_plane: current link network plane ('A' through 'H')
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400100 * @mon_state: cookie with information needed by link monitor
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500101 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
102 * @exp_msg_count: # of tunnelled messages expected during link changeover
103 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
104 * @mtu: current maximum packet size for this link
105 * @advertised_mtu: advertised own mtu when link is being established
106 * @transmitq: queue for sent, non-acked messages
107 * @backlogq: queue for messages waiting to be sent
108 * @snt_nxt: next sequence number to use for outbound messages
LUU Duc Canh31c4f4c2018-11-10 14:23:50 -0500109 * @prev_from: sequence number of most previous retransmission request
Jon Maloya4dc70d2018-07-06 15:22:36 +0200110 * @stale_cnt: counter for number of identical retransmit attempts
111 * @stale_limit: time when repeated identical retransmits must force link reset
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500112 * @ackers: # of peers that needs to ack each packet before it can be released
113 * @acked: # last packet acked by a certain peer. Used for broadcast.
114 * @rcv_nxt: next sequence number to expect for inbound messages
115 * @deferred_queue: deferred queue saved OOS b'cast message received from node
116 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
117 * @inputq: buffer queue for messages to be delivered upwards
118 * @namedq: buffer queue for name table messages to be delivered upwards
119 * @next_out: ptr to first unsent outbound message in queue
120 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
121 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
122 * @reasm_buf: head of partially reassembled inbound message fragments
123 * @bc_rcvr: marks that this is a broadcast receiver link
124 * @stats: collects statistics regarding link activity
125 */
126struct tipc_link {
127 u32 addr;
128 char name[TIPC_MAX_LINK_NAME];
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500129 struct net *net;
130
131 /* Management and link supervision data */
Jon Maloy7ea817f2018-07-10 01:07:36 +0200132 u16 peer_session;
133 u16 session;
Jon Maloy9012de52018-07-10 01:07:35 +0200134 u16 snd_nxt_state;
135 u16 rcv_nxt_state;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500136 u32 peer_bearer_id;
137 u32 bearer_id;
138 u32 tolerance;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500139 u32 abort_limit;
140 u32 state;
141 u16 peer_caps;
Jon Maloy7ea817f2018-07-10 01:07:36 +0200142 bool in_session;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500143 bool active;
144 u32 silent_intv_cnt;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500145 char if_name[TIPC_MAX_IF_NAME];
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500146 u32 priority;
147 char net_plane;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400148 struct tipc_mon_state mon_state;
Jon Paul Maloy88e8ac72016-04-15 13:33:04 -0400149 u16 rst_cnt;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500150
151 /* Failover/synch */
152 u16 drop_point;
153 struct sk_buff *failover_reasm_skb;
Tuong Lien58ee86b2019-04-04 11:09:53 +0700154 struct sk_buff_head failover_deferdq;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500155
156 /* Max packet negotiation */
157 u16 mtu;
158 u16 advertised_mtu;
159
160 /* Sending */
161 struct sk_buff_head transmq;
162 struct sk_buff_head backlogq;
163 struct {
164 u16 len;
165 u16 limit;
166 } backlog[5];
167 u16 snd_nxt;
LUU Duc Canh31c4f4c2018-11-10 14:23:50 -0500168 u16 prev_from;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500169 u16 window;
Jon Maloya4dc70d2018-07-06 15:22:36 +0200170 u16 stale_cnt;
171 unsigned long stale_limit;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500172
173 /* Reception */
174 u16 rcv_nxt;
175 u32 rcv_unacked;
176 struct sk_buff_head deferdq;
177 struct sk_buff_head *inputq;
178 struct sk_buff_head *namedq;
179
180 /* Congestion handling */
181 struct sk_buff_head wakeupq;
182
183 /* Fragmentation/reassembly */
184 struct sk_buff *reasm_buf;
185
186 /* Broadcast */
187 u16 ackers;
188 u16 acked;
189 struct tipc_link *bc_rcvlink;
190 struct tipc_link *bc_sndlink;
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -0400191 u8 nack_state;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500192 bool bc_peer_is_up;
193
194 /* Statistics */
195 struct tipc_stats stats;
196};
197
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400198/*
199 * Error message prefixes
200 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400201static const char *link_co_err = "Link tunneling error, ";
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400202static const char *link_rst_msg = "Resetting link ";
Richard Alpe7be57fc2014-11-20 10:29:12 +0100203
Jon Paul Maloy52666982015-10-22 08:51:41 -0400204/* Send states for broadcast NACKs
205 */
206enum {
207 BC_NACK_SND_CONDITIONAL,
208 BC_NACK_SND_UNCONDITIONAL,
209 BC_NACK_SND_SUPPRESS,
210};
211
LUU Duc Canh31c4f4c2018-11-10 14:23:50 -0500212#define TIPC_BC_RETR_LIM msecs_to_jiffies(10) /* [ms] */
Tuong Lien382f5982019-04-04 11:09:52 +0700213#define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -0400214
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900215/*
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400216 * Interval between NACKs when packets arrive out of order
217 */
218#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500219
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400220/* Link FSM states:
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400221 */
222enum {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400223 LINK_ESTABLISHED = 0xe,
224 LINK_ESTABLISHING = 0xe << 4,
225 LINK_RESET = 0x1 << 8,
226 LINK_RESETTING = 0x2 << 12,
227 LINK_PEER_RESET = 0xd << 16,
228 LINK_FAILINGOVER = 0xf << 20,
229 LINK_SYNCHING = 0xc << 24
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400230};
231
232/* Link FSM state checking routines
233 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400234static int link_is_up(struct tipc_link *l)
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400235{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400236 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400237}
238
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400239static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
240 struct sk_buff_head *xmitq);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -0400241static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
Jon Maloy8d6e79d2017-11-08 09:59:26 +0100242 bool probe_reply, u16 rcvgap,
243 int tolerance, int priority,
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -0400244 struct sk_buff_head *xmitq);
Jon Paul Maloy1a906322015-11-19 14:30:47 -0500245static void link_print(struct tipc_link *l, const char *str);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -0400246static int tipc_link_build_nack_msg(struct tipc_link *l,
247 struct sk_buff_head *xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400248static void tipc_link_build_bc_init_msg(struct tipc_link *l,
249 struct sk_buff_head *xmitq);
250static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
Tuong Lien91959482019-04-04 11:09:51 +0700251static u16 tipc_build_gap_ack_blks(struct tipc_link *l, void *data);
252static void tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
253 struct tipc_gap_ack_blks *ga,
254 struct sk_buff_head *xmitq);
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400255
Per Lidenb97bf3f2006-01-02 19:04:38 +0100256/*
Sam Ravnborg05790c62006-03-20 22:37:04 -0800257 * Simple non-static link routines (i.e. referenced outside this file)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100258 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400259bool tipc_link_is_up(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100260{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400261 return link_is_up(l);
262}
263
Jon Paul Maloyc8199302015-10-15 14:52:46 -0400264bool tipc_link_peer_is_down(struct tipc_link *l)
265{
266 return l->state == LINK_PEER_RESET;
267}
268
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400269bool tipc_link_is_reset(struct tipc_link *l)
270{
271 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
272}
273
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400274bool tipc_link_is_establishing(struct tipc_link *l)
275{
276 return l->state == LINK_ESTABLISHING;
277}
278
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400279bool tipc_link_is_synching(struct tipc_link *l)
280{
281 return l->state == LINK_SYNCHING;
282}
283
284bool tipc_link_is_failingover(struct tipc_link *l)
285{
286 return l->state == LINK_FAILINGOVER;
287}
288
289bool tipc_link_is_blocked(struct tipc_link *l)
290{
291 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100292}
293
Wu Fengguang742e0382015-10-24 22:56:01 +0800294static bool link_is_bc_sndlink(struct tipc_link *l)
Jon Paul Maloy52666982015-10-22 08:51:41 -0400295{
296 return !l->bc_sndlink;
297}
298
Wu Fengguang742e0382015-10-24 22:56:01 +0800299static bool link_is_bc_rcvlink(struct tipc_link *l)
Jon Paul Maloy52666982015-10-22 08:51:41 -0400300{
301 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
302}
303
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400304void tipc_link_set_active(struct tipc_link *l, bool active)
305{
306 l->active = active;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100307}
308
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500309u32 tipc_link_id(struct tipc_link *l)
310{
311 return l->peer_bearer_id << 16 | l->bearer_id;
312}
313
314int tipc_link_window(struct tipc_link *l)
315{
316 return l->window;
317}
318
319int tipc_link_prio(struct tipc_link *l)
320{
321 return l->priority;
322}
323
324unsigned long tipc_link_tolerance(struct tipc_link *l)
325{
326 return l->tolerance;
327}
328
329struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
330{
331 return l->inputq;
332}
333
334char tipc_link_plane(struct tipc_link *l)
335{
336 return l->net_plane;
337}
338
Jon Maloy9012de52018-07-10 01:07:35 +0200339void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
340{
341 l->peer_caps = capabilities;
342}
343
Jon Paul Maloy52666982015-10-22 08:51:41 -0400344void tipc_link_add_bc_peer(struct tipc_link *snd_l,
345 struct tipc_link *uc_l,
346 struct sk_buff_head *xmitq)
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400347{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400348 struct tipc_link *rcv_l = uc_l->bc_rcvlink;
349
350 snd_l->ackers++;
351 rcv_l->acked = snd_l->snd_nxt - 1;
Jon Paul Maloy9a650832015-11-19 14:12:50 -0500352 snd_l->state = LINK_ESTABLISHED;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400353 tipc_link_build_bc_init_msg(uc_l, xmitq);
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400354}
355
Jon Paul Maloy52666982015-10-22 08:51:41 -0400356void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
357 struct tipc_link *rcv_l,
358 struct sk_buff_head *xmitq)
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400359{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400360 u16 ack = snd_l->snd_nxt - 1;
361
362 snd_l->ackers--;
Jon Paul Maloya71eb722016-07-11 16:08:36 -0400363 rcv_l->bc_peer_is_up = true;
364 rcv_l->state = LINK_ESTABLISHED;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400365 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
Tuong Lien26574db2018-12-19 09:17:57 +0700366 trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!");
Jon Paul Maloy52666982015-10-22 08:51:41 -0400367 tipc_link_reset(rcv_l);
368 rcv_l->state = LINK_RESET;
369 if (!snd_l->ackers) {
Tuong Lien26574db2018-12-19 09:17:57 +0700370 trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!");
Jon Paul Maloy52666982015-10-22 08:51:41 -0400371 tipc_link_reset(snd_l);
Jon Paul Maloy9a650832015-11-19 14:12:50 -0500372 snd_l->state = LINK_RESET;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400373 __skb_queue_purge(xmitq);
374 }
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400375}
376
377int tipc_link_bc_peers(struct tipc_link *l)
378{
379 return l->ackers;
380}
381
YueHaibinge064cce2018-07-19 17:16:59 +0800382static u16 link_bc_rcv_gap(struct tipc_link *l)
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -0400383{
384 struct sk_buff *skb = skb_peek(&l->deferdq);
385 u16 gap = 0;
386
387 if (more(l->snd_nxt, l->rcv_nxt))
388 gap = l->snd_nxt - l->rcv_nxt;
389 if (skb)
390 gap = buf_seqno(skb) - l->rcv_nxt;
391 return gap;
392}
393
Jon Paul Maloy959e1782015-10-22 08:51:43 -0400394void tipc_link_set_mtu(struct tipc_link *l, int mtu)
395{
396 l->mtu = mtu;
397}
398
399int tipc_link_mtu(struct tipc_link *l)
400{
401 return l->mtu;
402}
403
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500404u16 tipc_link_rcv_nxt(struct tipc_link *l)
405{
406 return l->rcv_nxt;
407}
408
409u16 tipc_link_acked(struct tipc_link *l)
410{
411 return l->acked;
412}
413
414char *tipc_link_name(struct tipc_link *l)
415{
416 return l->name;
417}
418
LUU Duc Canhc140eb12018-09-26 21:00:54 +0200419u32 tipc_link_state(struct tipc_link *l)
420{
421 return l->state;
422}
423
Per Lidenb97bf3f2006-01-02 19:04:38 +0100424/**
Per Liden4323add2006-01-18 00:38:21 +0100425 * tipc_link_create - create a new link
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400426 * @n: pointer to associated node
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400427 * @if_name: associated interface name
428 * @bearer_id: id (index) of associated bearer
429 * @tolerance: link tolerance to be used by link
430 * @net_plane: network plane (A,B,c..) this link belongs to
431 * @mtu: mtu to be advertised by link
432 * @priority: priority to be used by link
433 * @window: send window to be used by link
434 * @session: session to be used by link
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400435 * @ownnode: identity of own node
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400436 * @peer: node id of peer node
Jon Paul Maloyfd556f22015-10-22 08:51:40 -0400437 * @peer_caps: bitmap describing peer node capabilities
Jon Paul Maloy52666982015-10-22 08:51:41 -0400438 * @bc_sndlink: the namespace global link used for broadcast sending
439 * @bc_rcvlink: the peer specific link used for broadcast reception
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400440 * @inputq: queue to put messages ready for delivery
441 * @namedq: queue to put binding table update messages ready for delivery
442 * @link: return value, pointer to put the created link
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900443 *
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400444 * Returns true if link was created, otherwise false
Per Lidenb97bf3f2006-01-02 19:04:38 +0100445 */
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400446bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400447 int tolerance, char net_plane, u32 mtu, int priority,
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100448 int window, u32 session, u32 self,
449 u32 peer, u8 *peer_id, u16 peer_caps,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400450 struct tipc_link *bc_sndlink,
451 struct tipc_link *bc_rcvlink,
452 struct sk_buff_head *inputq,
453 struct sk_buff_head *namedq,
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400454 struct tipc_link **link)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100455{
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100456 char peer_str[NODE_ID_STR_LEN] = {0,};
457 char self_str[NODE_ID_STR_LEN] = {0,};
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400458 struct tipc_link *l;
Allan Stephens37b9c082011-02-28 11:32:27 -0500459
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400460 l = kzalloc(sizeof(*l), GFP_ATOMIC);
461 if (!l)
462 return false;
463 *link = l;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500464 l->session = session;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400465
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100466 /* Set link name for unicast links only */
467 if (peer_id) {
468 tipc_nodeid2string(self_str, tipc_own_id(net));
469 if (strlen(self_str) > 16)
470 sprintf(self_str, "%x", self);
471 tipc_nodeid2string(peer_str, peer_id);
472 if (strlen(peer_str) > 16)
473 sprintf(peer_str, "%x", peer);
474 }
475 /* Peer i/f name will be completed by reset/activate message */
Jon Maloy7494cfa2018-03-29 23:20:45 +0200476 snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
477 self_str, if_name, peer_str);
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100478
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500479 strcpy(l->if_name, if_name);
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400480 l->addr = peer;
Jon Paul Maloyfd556f22015-10-22 08:51:40 -0400481 l->peer_caps = peer_caps;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400482 l->net = net;
Jon Maloy7ea817f2018-07-10 01:07:36 +0200483 l->in_session = false;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400484 l->bearer_id = bearer_id;
485 l->tolerance = tolerance;
Jon Maloy047491e2018-10-10 17:34:01 +0200486 if (bc_rcvlink)
487 bc_rcvlink->tolerance = tolerance;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400488 l->net_plane = net_plane;
489 l->advertised_mtu = mtu;
490 l->mtu = mtu;
491 l->priority = priority;
492 tipc_link_set_queue_limits(l, window);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400493 l->ackers = 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400494 l->bc_sndlink = bc_sndlink;
495 l->bc_rcvlink = bc_rcvlink;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400496 l->inputq = inputq;
497 l->namedq = namedq;
498 l->state = LINK_RESETTING;
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400499 __skb_queue_head_init(&l->transmq);
500 __skb_queue_head_init(&l->backlogq);
501 __skb_queue_head_init(&l->deferdq);
Tuong Lien58ee86b2019-04-04 11:09:53 +0700502 __skb_queue_head_init(&l->failover_deferdq);
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400503 skb_queue_head_init(&l->wakeupq);
504 skb_queue_head_init(l->inputq);
505 return true;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100506}
507
Jon Paul Maloy32301902015-10-22 08:51:37 -0400508/**
509 * tipc_link_bc_create - create new link to be used for broadcast
510 * @n: pointer to associated node
Jon Maloy4c94cc22017-11-30 16:47:25 +0100511 * @mtu: mtu to be used initially if no peers
Jon Paul Maloy32301902015-10-22 08:51:37 -0400512 * @window: send window to be used
513 * @inputq: queue to put messages ready for delivery
514 * @namedq: queue to put binding table update messages ready for delivery
515 * @link: return value, pointer to put the created link
516 *
517 * Returns true if link was created, otherwise false
518 */
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400519bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400520 int mtu, int window, u16 peer_caps,
Jon Paul Maloy32301902015-10-22 08:51:37 -0400521 struct sk_buff_head *inputq,
522 struct sk_buff_head *namedq,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400523 struct tipc_link *bc_sndlink,
Jon Paul Maloy32301902015-10-22 08:51:37 -0400524 struct tipc_link **link)
525{
526 struct tipc_link *l;
527
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400528 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100529 0, ownnode, peer, NULL, peer_caps, bc_sndlink,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400530 NULL, inputq, namedq, link))
Jon Paul Maloy32301902015-10-22 08:51:37 -0400531 return false;
532
533 l = *link;
534 strcpy(l->name, tipc_bclink_name);
Tuong Lien26574db2018-12-19 09:17:57 +0700535 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!");
Jon Paul Maloy32301902015-10-22 08:51:37 -0400536 tipc_link_reset(l);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400537 l->state = LINK_RESET;
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400538 l->ackers = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400539 l->bc_rcvlink = l;
540
541 /* Broadcast send link is always up */
542 if (link_is_bc_sndlink(l))
543 l->state = LINK_ESTABLISHED;
544
Jon Paul Maloy01fd12b2017-01-18 13:50:53 -0500545 /* Disable replicast if even a single peer doesn't support it */
546 if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
547 tipc_bcast_disable_rcast(net);
548
Jon Paul Maloy32301902015-10-22 08:51:37 -0400549 return true;
550}
551
Per Lidenb97bf3f2006-01-02 19:04:38 +0100552/**
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400553 * tipc_link_fsm_evt - link finite state machine
554 * @l: pointer to link
555 * @evt: state machine event to be processed
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400556 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400557int tipc_link_fsm_evt(struct tipc_link *l, int evt)
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400558{
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400559 int rc = 0;
Tuong Lien26574db2018-12-19 09:17:57 +0700560 int old_state = l->state;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400561
562 switch (l->state) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400563 case LINK_RESETTING:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400564 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400565 case LINK_PEER_RESET_EVT:
566 l->state = LINK_PEER_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400567 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400568 case LINK_RESET_EVT:
569 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400570 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400571 case LINK_FAILURE_EVT:
572 case LINK_FAILOVER_BEGIN_EVT:
573 case LINK_ESTABLISH_EVT:
574 case LINK_FAILOVER_END_EVT:
575 case LINK_SYNCH_BEGIN_EVT:
576 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400577 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400578 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400579 }
580 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400581 case LINK_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400582 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400583 case LINK_PEER_RESET_EVT:
584 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400585 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400586 case LINK_FAILOVER_BEGIN_EVT:
587 l->state = LINK_FAILINGOVER;
588 case LINK_FAILURE_EVT:
589 case LINK_RESET_EVT:
590 case LINK_ESTABLISH_EVT:
591 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400592 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400593 case LINK_SYNCH_BEGIN_EVT:
594 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400595 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400596 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400597 }
598 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400599 case LINK_PEER_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400600 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400601 case LINK_RESET_EVT:
602 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400603 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400604 case LINK_PEER_RESET_EVT:
605 case LINK_ESTABLISH_EVT:
606 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400607 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400608 case LINK_SYNCH_BEGIN_EVT:
609 case LINK_SYNCH_END_EVT:
610 case LINK_FAILOVER_BEGIN_EVT:
611 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400612 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400613 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400614 }
615 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400616 case LINK_FAILINGOVER:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400617 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400618 case LINK_FAILOVER_END_EVT:
619 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400620 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400621 case LINK_PEER_RESET_EVT:
622 case LINK_RESET_EVT:
623 case LINK_ESTABLISH_EVT:
624 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400625 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400626 case LINK_FAILOVER_BEGIN_EVT:
627 case LINK_SYNCH_BEGIN_EVT:
628 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400629 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400630 goto illegal_evt;
631 }
632 break;
633 case LINK_ESTABLISHING:
634 switch (evt) {
635 case LINK_ESTABLISH_EVT:
636 l->state = LINK_ESTABLISHED;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400637 break;
638 case LINK_FAILOVER_BEGIN_EVT:
639 l->state = LINK_FAILINGOVER;
640 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400641 case LINK_RESET_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400642 l->state = LINK_RESET;
643 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400644 case LINK_FAILURE_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400645 case LINK_PEER_RESET_EVT:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400646 case LINK_SYNCH_BEGIN_EVT:
647 case LINK_FAILOVER_END_EVT:
648 break;
649 case LINK_SYNCH_END_EVT:
650 default:
651 goto illegal_evt;
652 }
653 break;
654 case LINK_ESTABLISHED:
655 switch (evt) {
656 case LINK_PEER_RESET_EVT:
657 l->state = LINK_PEER_RESET;
658 rc |= TIPC_LINK_DOWN_EVT;
659 break;
660 case LINK_FAILURE_EVT:
661 l->state = LINK_RESETTING;
662 rc |= TIPC_LINK_DOWN_EVT;
663 break;
664 case LINK_RESET_EVT:
665 l->state = LINK_RESET;
666 break;
667 case LINK_ESTABLISH_EVT:
Jon Paul Maloy5ae2f8e2015-08-20 02:12:55 -0400668 case LINK_SYNCH_END_EVT:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400669 break;
670 case LINK_SYNCH_BEGIN_EVT:
671 l->state = LINK_SYNCHING;
672 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400673 case LINK_FAILOVER_BEGIN_EVT:
674 case LINK_FAILOVER_END_EVT:
675 default:
676 goto illegal_evt;
677 }
678 break;
679 case LINK_SYNCHING:
680 switch (evt) {
681 case LINK_PEER_RESET_EVT:
682 l->state = LINK_PEER_RESET;
683 rc |= TIPC_LINK_DOWN_EVT;
684 break;
685 case LINK_FAILURE_EVT:
686 l->state = LINK_RESETTING;
687 rc |= TIPC_LINK_DOWN_EVT;
688 break;
689 case LINK_RESET_EVT:
690 l->state = LINK_RESET;
691 break;
692 case LINK_ESTABLISH_EVT:
693 case LINK_SYNCH_BEGIN_EVT:
694 break;
695 case LINK_SYNCH_END_EVT:
696 l->state = LINK_ESTABLISHED;
697 break;
698 case LINK_FAILOVER_BEGIN_EVT:
699 case LINK_FAILOVER_END_EVT:
700 default:
701 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400702 }
703 break;
704 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400705 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400706 }
Tuong Lien26574db2018-12-19 09:17:57 +0700707 trace_tipc_link_fsm(l->name, old_state, l->state, evt);
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400708 return rc;
709illegal_evt:
710 pr_err("Illegal FSM event %x in state %x on link %s\n",
711 evt, l->state, l->name);
Tuong Lien26574db2018-12-19 09:17:57 +0700712 trace_tipc_link_fsm(l->name, old_state, l->state, evt);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400713 return rc;
714}
715
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400716/* link_profile_stats - update statistical profiling of traffic
717 */
718static void link_profile_stats(struct tipc_link *l)
719{
720 struct sk_buff *skb;
721 struct tipc_msg *msg;
722 int length;
723
724 /* Update counters used in statistical profiling of send traffic */
725 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
726 l->stats.queue_sz_counts++;
727
728 skb = skb_peek(&l->transmq);
729 if (!skb)
730 return;
731 msg = buf_msg(skb);
732 length = msg_size(msg);
733
734 if (msg_user(msg) == MSG_FRAGMENTER) {
735 if (msg_type(msg) != FIRST_FRAGMENT)
736 return;
737 length = msg_size(msg_get_wrapped(msg));
738 }
739 l->stats.msg_lengths_total += length;
740 l->stats.msg_length_counts++;
741 if (length <= 64)
742 l->stats.msg_length_profile[0]++;
743 else if (length <= 256)
744 l->stats.msg_length_profile[1]++;
745 else if (length <= 1024)
746 l->stats.msg_length_profile[2]++;
747 else if (length <= 4096)
748 l->stats.msg_length_profile[3]++;
749 else if (length <= 16384)
750 l->stats.msg_length_profile[4]++;
751 else if (length <= 32768)
752 l->stats.msg_length_profile[5]++;
753 else
754 l->stats.msg_length_profile[6]++;
755}
756
Tuong Lien26574db2018-12-19 09:17:57 +0700757/**
758 * tipc_link_too_silent - check if link is "too silent"
759 * @l: tipc link to be checked
760 *
761 * Returns true if the link 'silent_intv_cnt' is about to reach the
762 * 'abort_limit' value, otherwise false
763 */
764bool tipc_link_too_silent(struct tipc_link *l)
765{
766 return (l->silent_intv_cnt + 2 > l->abort_limit);
767}
768
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400769/* tipc_link_timeout - perform periodic task as instructed from node timeout
770 */
771int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
772{
Ying Xuec91522f2016-06-15 14:11:31 +0800773 int mtyp = 0;
774 int rc = 0;
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400775 bool state = false;
776 bool probe = false;
777 bool setup = false;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400778 u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
779 u16 bc_acked = l->bc_rcvlink->acked;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400780 struct tipc_mon_state *mstate = &l->mon_state;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400781
Tuong Lien26574db2018-12-19 09:17:57 +0700782 trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " ");
783 trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " ");
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400784 switch (l->state) {
785 case LINK_ESTABLISHED:
786 case LINK_SYNCHING:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400787 mtyp = STATE_MSG;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400788 link_profile_stats(l);
789 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
790 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
791 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400792 state = bc_acked != bc_snt;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400793 state |= l->bc_rcvlink->rcv_unacked;
794 state |= l->rcv_unacked;
795 state |= !skb_queue_empty(&l->transmq);
796 state |= !skb_queue_empty(&l->deferdq);
797 probe = mstate->probing;
798 probe |= l->silent_intv_cnt;
799 if (probe || mstate->monitoring)
800 l->silent_intv_cnt++;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400801 break;
802 case LINK_RESET:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400803 setup = l->rst_cnt++ <= 4;
804 setup |= !(l->rst_cnt % 16);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400805 mtyp = RESET_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400806 break;
807 case LINK_ESTABLISHING:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400808 setup = true;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400809 mtyp = ACTIVATE_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400810 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400811 case LINK_PEER_RESET:
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400812 case LINK_RESETTING:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400813 case LINK_FAILINGOVER:
814 break;
815 default:
816 break;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400817 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400818
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400819 if (state || probe || setup)
Jon Maloy8d6e79d2017-11-08 09:59:26 +0100820 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400821
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400822 return rc;
823}
824
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400825/**
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400826 * link_schedule_user - schedule a message sender for wakeup after congestion
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500827 * @l: congested link
828 * @hdr: header of message that is being sent
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400829 * Create pseudo msg to send back to user when congestion abates
Per Lidenb97bf3f2006-01-02 19:04:38 +0100830 */
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500831static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100832{
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500833 u32 dnode = tipc_own_addr(l->net);
834 u32 dport = msg_origport(hdr);
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400835 struct sk_buff *skb;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100836
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400837 /* Create and schedule wakeup pseudo message */
838 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500839 dnode, l->addr, dport, 0, 0);
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400840 if (!skb)
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400841 return -ENOBUFS;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500842 msg_set_dest_droppable(buf_msg(skb), true);
843 TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
844 skb_queue_tail(&l->wakeupq, skb);
845 l->stats.link_congs++;
Tuong Lien26574db2018-12-19 09:17:57 +0700846 trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!");
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400847 return -ELINKCONG;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100848}
849
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400850/**
851 * link_prepare_wakeup - prepare users for wakeup after congestion
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500852 * @l: congested link
853 * Wake up a number of waiting users, as permitted by available space
854 * in the send queue
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400855 */
YueHaibinge064cce2018-07-19 17:16:59 +0800856static void link_prepare_wakeup(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100857{
Ying Xue58d78b32014-11-26 11:41:51 +0800858 struct sk_buff *skb, *tmp;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500859 int imp, i = 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100860
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400861 skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
862 imp = TIPC_SKB_CB(skb)->chain_imp;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500863 if (l->backlog[imp].len < l->backlog[imp].limit) {
864 skb_unlink(skb, &l->wakeupq);
865 skb_queue_tail(l->inputq, skb);
866 } else if (i++ > 10) {
Per Lidenb97bf3f2006-01-02 19:04:38 +0100867 break;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500868 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100869 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100870}
871
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400872void tipc_link_reset(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100873{
Ying Xuea1f8dd32018-10-11 19:57:56 +0800874 struct sk_buff_head list;
875
876 __skb_queue_head_init(&list);
877
Jon Maloy7ea817f2018-07-10 01:07:36 +0200878 l->in_session = false;
Tuong Lienf7a93782019-04-16 10:48:07 +0700879 /* Force re-synch of peer session number before establishing */
880 l->peer_session--;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500881 l->session++;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400882 l->mtu = l->advertised_mtu;
Ying Xuea1f8dd32018-10-11 19:57:56 +0800883
Parthasarathy Bhuvaragan3f32d0b2018-09-25 22:09:10 +0200884 spin_lock_bh(&l->wakeupq.lock);
Ying Xuea1f8dd32018-10-11 19:57:56 +0800885 skb_queue_splice_init(&l->wakeupq, &list);
Parthasarathy Bhuvaragan3f32d0b2018-09-25 22:09:10 +0200886 spin_unlock_bh(&l->wakeupq.lock);
887
Ying Xuea1f8dd32018-10-11 19:57:56 +0800888 spin_lock_bh(&l->inputq->lock);
889 skb_queue_splice_init(&list, l->inputq);
890 spin_unlock_bh(&l->inputq->lock);
891
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400892 __skb_queue_purge(&l->transmq);
893 __skb_queue_purge(&l->deferdq);
Jon Paul Maloy2af5ae32015-10-22 08:51:48 -0400894 __skb_queue_purge(&l->backlogq);
Tuong Lien58ee86b2019-04-04 11:09:53 +0700895 __skb_queue_purge(&l->failover_deferdq);
Jon Paul Maloy2af5ae32015-10-22 08:51:48 -0400896 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
897 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
898 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
899 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
900 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400901 kfree_skb(l->reasm_buf);
902 kfree_skb(l->failover_reasm_skb);
903 l->reasm_buf = NULL;
904 l->failover_reasm_skb = NULL;
905 l->rcv_unacked = 0;
906 l->snd_nxt = 1;
907 l->rcv_nxt = 1;
Jon Maloy9012de52018-07-10 01:07:35 +0200908 l->snd_nxt_state = 1;
909 l->rcv_nxt_state = 1;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400910 l->acked = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400911 l->silent_intv_cnt = 0;
Jon Paul Maloy88e8ac72016-04-15 13:33:04 -0400912 l->rst_cnt = 0;
Jon Maloya4dc70d2018-07-06 15:22:36 +0200913 l->stale_cnt = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400914 l->bc_peer_is_up = false;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400915 memset(&l->mon_state, 0, sizeof(l->mon_state));
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500916 tipc_link_reset_stats(l);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100917}
918
Per Lidenb97bf3f2006-01-02 19:04:38 +0100919/**
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400920 * tipc_link_xmit(): enqueue buffer list according to queue situation
921 * @link: link to use
922 * @list: chain of buffers containing message
923 * @xmitq: returned list of packets to be sent by caller
924 *
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500925 * Consumes the buffer chain.
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400926 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
927 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
928 */
929int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
930 struct sk_buff_head *xmitq)
931{
932 struct tipc_msg *hdr = buf_msg(skb_peek(list));
933 unsigned int maxwin = l->window;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500934 int imp = msg_importance(hdr);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400935 unsigned int mtu = l->mtu;
936 u16 ack = l->rcv_nxt - 1;
937 u16 seqno = l->snd_nxt;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400938 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400939 struct sk_buff_head *transmq = &l->transmq;
940 struct sk_buff_head *backlogq = &l->backlogq;
941 struct sk_buff *skb, *_skb, *bskb;
Jon Paul Maloy95901122016-11-25 10:35:02 -0500942 int pkt_cnt = skb_queue_len(list);
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500943 int rc = 0;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400944
Richard Alpe4952cd32016-02-11 10:43:15 +0100945 if (unlikely(msg_size(hdr) > mtu)) {
946 skb_queue_purge(list);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400947 return -EMSGSIZE;
Richard Alpe4952cd32016-02-11 10:43:15 +0100948 }
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400949
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500950 /* Allow oversubscription of one data msg per source at congestion */
951 if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
952 if (imp == TIPC_SYSTEM_IMPORTANCE) {
953 pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
954 return -ENOBUFS;
955 }
956 rc = link_schedule_user(l, hdr);
957 }
958
Jon Paul Maloy95901122016-11-25 10:35:02 -0500959 if (pkt_cnt > 1) {
960 l->stats.sent_fragmented++;
961 l->stats.sent_fragments += pkt_cnt;
962 }
963
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400964 /* Prepare each packet for sending, and add to relevant queue: */
965 while (skb_queue_len(list)) {
966 skb = skb_peek(list);
967 hdr = buf_msg(skb);
968 msg_set_seqno(hdr, seqno);
969 msg_set_ack(hdr, ack);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400970 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400971
972 if (likely(skb_queue_len(transmq) < maxwin)) {
973 _skb = skb_clone(skb, GFP_ATOMIC);
Richard Alpe4952cd32016-02-11 10:43:15 +0100974 if (!_skb) {
975 skb_queue_purge(list);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400976 return -ENOBUFS;
Richard Alpe4952cd32016-02-11 10:43:15 +0100977 }
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400978 __skb_dequeue(list);
979 __skb_queue_tail(transmq, skb);
Hoang Le05572272018-12-19 11:42:19 +0700980 /* next retransmit attempt */
981 if (link_is_bc_sndlink(l))
982 TIPC_SKB_CB(skb)->nxt_retr =
983 jiffies + TIPC_BC_RETR_LIM;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400984 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400985 TIPC_SKB_CB(skb)->ackers = l->ackers;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400986 l->rcv_unacked = 0;
Jon Paul Maloy95901122016-11-25 10:35:02 -0500987 l->stats.sent_pkts++;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400988 seqno++;
989 continue;
990 }
991 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
992 kfree_skb(__skb_dequeue(list));
993 l->stats.sent_bundled++;
994 continue;
995 }
996 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
997 kfree_skb(__skb_dequeue(list));
998 __skb_queue_tail(backlogq, bskb);
999 l->backlog[msg_importance(buf_msg(bskb))].len++;
1000 l->stats.sent_bundled++;
1001 l->stats.sent_bundles++;
1002 continue;
1003 }
1004 l->backlog[imp].len += skb_queue_len(list);
1005 skb_queue_splice_tail_init(list, backlogq);
1006 }
1007 l->snd_nxt = seqno;
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001008 return rc;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001009}
1010
YueHaibinge064cce2018-07-19 17:16:59 +08001011static void tipc_link_advance_backlog(struct tipc_link *l,
1012 struct sk_buff_head *xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001013{
1014 struct sk_buff *skb, *_skb;
1015 struct tipc_msg *hdr;
1016 u16 seqno = l->snd_nxt;
1017 u16 ack = l->rcv_nxt - 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001018 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001019
1020 while (skb_queue_len(&l->transmq) < l->window) {
1021 skb = skb_peek(&l->backlogq);
1022 if (!skb)
1023 break;
1024 _skb = skb_clone(skb, GFP_ATOMIC);
1025 if (!_skb)
1026 break;
1027 __skb_dequeue(&l->backlogq);
1028 hdr = buf_msg(skb);
1029 l->backlog[msg_importance(hdr)].len--;
1030 __skb_queue_tail(&l->transmq, skb);
Hoang Le05572272018-12-19 11:42:19 +07001031 /* next retransmit attempt */
1032 if (link_is_bc_sndlink(l))
1033 TIPC_SKB_CB(skb)->nxt_retr = jiffies + TIPC_BC_RETR_LIM;
1034
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001035 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001036 TIPC_SKB_CB(skb)->ackers = l->ackers;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001037 msg_set_seqno(hdr, seqno);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001038 msg_set_ack(hdr, ack);
1039 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001040 l->rcv_unacked = 0;
Jon Paul Maloy95901122016-11-25 10:35:02 -05001041 l->stats.sent_pkts++;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001042 seqno++;
1043 }
1044 l->snd_nxt = seqno;
1045}
1046
Jon Paul Maloy52666982015-10-22 08:51:41 -04001047static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
Allan Stephensd356eeb2006-06-25 23:40:01 -07001048{
Jon Paul Maloy52666982015-10-22 08:51:41 -04001049 struct tipc_msg *hdr = buf_msg(skb);
Allan Stephensd356eeb2006-06-25 23:40:01 -07001050
Jon Paul Maloy52666982015-10-22 08:51:41 -04001051 pr_warn("Retransmission failure on link <%s>\n", l->name);
Jon Paul Maloy40501f902017-08-21 17:59:30 +02001052 link_print(l, "State of link ");
Jon Paul Maloy52666982015-10-22 08:51:41 -04001053 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1054 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
1055 pr_info("sqno %u, prev: %x, src: %x\n",
1056 msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
Allan Stephensd356eeb2006-06-25 23:40:01 -07001057}
1058
Jon Maloya4dc70d2018-07-06 15:22:36 +02001059/* tipc_link_retrans() - retransmit one or more packets
1060 * @l: the link to transmit on
1061 * @r: the receiving link ordering the retransmit. Same as l if unicast
1062 * @from: retransmit from (inclusive) this sequence number
1063 * @to: retransmit to (inclusive) this sequence number
1064 * xmitq: queue for accumulating the retransmitted packets
1065 */
YueHaibinge064cce2018-07-19 17:16:59 +08001066static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
1067 u16 from, u16 to, struct sk_buff_head *xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001068{
1069 struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001070 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Maloya4dc70d2018-07-06 15:22:36 +02001071 u16 ack = l->rcv_nxt - 1;
1072 struct tipc_msg *hdr;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001073
1074 if (!skb)
1075 return 0;
LUU Duc Canh31c4f4c2018-11-10 14:23:50 -05001076 if (less(to, from))
1077 return 0;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001078
Tuong Lien26574db2018-12-19 09:17:57 +07001079 trace_tipc_link_retrans(r, from, to, &l->transmq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001080 /* Detect repeated retransmit failures on same packet */
LUU Duc Canh31c4f4c2018-11-10 14:23:50 -05001081 if (r->prev_from != from) {
1082 r->prev_from = from;
Jon Maloy047491e2018-10-10 17:34:01 +02001083 r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
Jon Maloy4af00f42018-10-11 22:02:29 +02001084 r->stale_cnt = 0;
Jon Maloya4dc70d2018-07-06 15:22:36 +02001085 } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001086 link_retransmit_failure(l, skb);
Tuong Lien26574db2018-12-19 09:17:57 +07001087 trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1088 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1089 trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
Jon Paul Maloy40501f902017-08-21 17:59:30 +02001090 if (link_is_bc_sndlink(l))
1091 return TIPC_LINK_DOWN_EVT;
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001092 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001093 }
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001094
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001095 skb_queue_walk(&l->transmq, skb) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001096 hdr = buf_msg(skb);
Jon Maloya4dc70d2018-07-06 15:22:36 +02001097 if (less(msg_seqno(hdr), from))
1098 continue;
1099 if (more(msg_seqno(hdr), to))
1100 break;
LUU Duc Canh31c4f4c2018-11-10 14:23:50 -05001101 if (link_is_bc_sndlink(l)) {
1102 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1103 continue;
1104 TIPC_SKB_CB(skb)->nxt_retr = jiffies + TIPC_BC_RETR_LIM;
1105 }
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001106 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
1107 if (!_skb)
1108 return 0;
1109 hdr = buf_msg(_skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001110 msg_set_ack(hdr, ack);
1111 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001112 _skb->priority = TC_PRIO_CONTROL;
1113 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001114 l->stats.retransmitted++;
1115 }
1116 return 0;
1117}
1118
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001119/* tipc_data_input - deliver data and name distr msgs to upper layer
Erik Hugne7ae934b2014-07-01 10:22:40 +02001120 *
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001121 * Consumes buffer if message is of right type
Erik Hugne7ae934b2014-07-01 10:22:40 +02001122 * Node lock must be held
1123 */
Jon Paul Maloy52666982015-10-22 08:51:41 -04001124static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001125 struct sk_buff_head *inputq)
Erik Hugne7ae934b2014-07-01 10:22:40 +02001126{
Jon Maloy399574d2017-10-13 11:04:32 +02001127 struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
Jon Paul Maloya853e4c2017-01-18 13:50:52 -05001128 struct tipc_msg *hdr = buf_msg(skb);
1129
1130 switch (msg_user(hdr)) {
Erik Hugne7ae934b2014-07-01 10:22:40 +02001131 case TIPC_LOW_IMPORTANCE:
1132 case TIPC_MEDIUM_IMPORTANCE:
1133 case TIPC_HIGH_IMPORTANCE:
1134 case TIPC_CRITICAL_IMPORTANCE:
Jon Maloy2f487712017-10-13 11:04:31 +02001135 if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
Jon Maloy399574d2017-10-13 11:04:32 +02001136 skb_queue_tail(mc_inputq, skb);
Jon Paul Maloya853e4c2017-01-18 13:50:52 -05001137 return true;
1138 }
Gustavo A. R. Silvaf79e3362019-01-23 01:09:31 -06001139 /* fall through */
Jon Maloy2f487712017-10-13 11:04:31 +02001140 case CONN_MANAGER:
Jon Maloy36c0a9d2017-10-16 16:04:51 +02001141 skb_queue_tail(inputq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001142 return true;
Jon Maloy399574d2017-10-13 11:04:32 +02001143 case GROUP_PROTOCOL:
1144 skb_queue_tail(mc_inputq, skb);
1145 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001146 case NAME_DISTRIBUTOR:
Jon Paul Maloy52666982015-10-22 08:51:41 -04001147 l->bc_rcvlink->state = LINK_ESTABLISHED;
1148 skb_queue_tail(l->namedq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001149 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001150 case MSG_BUNDLER:
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001151 case TUNNEL_PROTOCOL:
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001152 case MSG_FRAGMENTER:
1153 case BCAST_PROTOCOL:
1154 return false;
1155 default:
1156 pr_warn("Dropping received illegal msg type\n");
1157 kfree_skb(skb);
Hoang Le7384b532019-02-11 09:18:28 +07001158 return true;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001159 };
1160}
1161
1162/* tipc_link_input - process packet that has passed link protocol check
1163 *
1164 * Consumes buffer
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001165 */
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001166static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
Tuong Lien58ee86b2019-04-04 11:09:53 +07001167 struct sk_buff_head *inputq,
1168 struct sk_buff **reasm_skb)
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001169{
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001170 struct tipc_msg *hdr = buf_msg(skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001171 struct sk_buff *iskb;
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001172 struct sk_buff_head tmpq;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001173 int usr = msg_user(hdr);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001174 int pos = 0;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001175
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001176 if (usr == MSG_BUNDLER) {
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001177 skb_queue_head_init(&tmpq);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001178 l->stats.recv_bundles++;
1179 l->stats.recv_bundled += msg_msgcnt(hdr);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001180 while (tipc_msg_extract(skb, &iskb, &pos))
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001181 tipc_data_input(l, iskb, &tmpq);
1182 tipc_skb_queue_splice_tail(&tmpq, inputq);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001183 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001184 } else if (usr == MSG_FRAGMENTER) {
1185 l->stats.recv_fragments++;
1186 if (tipc_buf_append(reasm_skb, &skb)) {
1187 l->stats.recv_fragmented++;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001188 tipc_data_input(l, skb, inputq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001189 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1190 pr_warn_ratelimited("Unable to build fragment list\n");
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001191 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001192 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001193 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001194 } else if (usr == BCAST_PROTOCOL) {
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001195 tipc_bcast_lock(l->net);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001196 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001197 tipc_bcast_unlock(l->net);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001198 }
Tuong Lien58ee86b2019-04-04 11:09:53 +07001199
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001200 kfree_skb(skb);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001201 return 0;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001202}
1203
Tuong Lien58ee86b2019-04-04 11:09:53 +07001204/* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the
1205 * inner message along with the ones in the old link's
1206 * deferdq
1207 * @l: tunnel link
1208 * @skb: TUNNEL_PROTOCOL message
1209 * @inputq: queue to put messages ready for delivery
1210 */
1211static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb,
1212 struct sk_buff_head *inputq)
1213{
1214 struct sk_buff **reasm_skb = &l->failover_reasm_skb;
1215 struct sk_buff_head *fdefq = &l->failover_deferdq;
1216 struct tipc_msg *hdr = buf_msg(skb);
1217 struct sk_buff *iskb;
1218 int ipos = 0;
1219 int rc = 0;
1220 u16 seqno;
1221
1222 /* SYNCH_MSG */
1223 if (msg_type(hdr) == SYNCH_MSG)
1224 goto drop;
1225
1226 /* FAILOVER_MSG */
1227 if (!tipc_msg_extract(skb, &iskb, &ipos)) {
1228 pr_warn_ratelimited("Cannot extract FAILOVER_MSG, defq: %d\n",
1229 skb_queue_len(fdefq));
1230 return rc;
1231 }
1232
1233 do {
1234 seqno = buf_seqno(iskb);
1235
1236 if (unlikely(less(seqno, l->drop_point))) {
1237 kfree_skb(iskb);
1238 continue;
1239 }
1240
1241 if (unlikely(seqno != l->drop_point)) {
1242 __tipc_skb_queue_sorted(fdefq, seqno, iskb);
1243 continue;
1244 }
1245
1246 l->drop_point++;
1247
1248 if (!tipc_data_input(l, iskb, inputq))
1249 rc |= tipc_link_input(l, iskb, inputq, reasm_skb);
1250 if (unlikely(rc))
1251 break;
1252 } while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point)));
1253
1254drop:
1255 kfree_skb(skb);
1256 return rc;
1257}
1258
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001259static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1260{
1261 bool released = false;
1262 struct sk_buff *skb, *tmp;
1263
1264 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1265 if (more(buf_seqno(skb), acked))
1266 break;
1267 __skb_unlink(skb, &l->transmq);
1268 kfree_skb(skb);
1269 released = true;
1270 }
1271 return released;
1272}
1273
Tuong Lien91959482019-04-04 11:09:51 +07001274/* tipc_build_gap_ack_blks - build Gap ACK blocks
1275 * @l: tipc link that data have come with gaps in sequence if any
1276 * @data: data buffer to store the Gap ACK blocks after built
1277 *
1278 * returns the actual allocated memory size
1279 */
1280static u16 tipc_build_gap_ack_blks(struct tipc_link *l, void *data)
1281{
1282 struct sk_buff *skb = skb_peek(&l->deferdq);
1283 struct tipc_gap_ack_blks *ga = data;
1284 u16 len, expect, seqno = 0;
1285 u8 n = 0;
1286
1287 if (!skb)
1288 goto exit;
1289
1290 expect = buf_seqno(skb);
1291 skb_queue_walk(&l->deferdq, skb) {
1292 seqno = buf_seqno(skb);
1293 if (unlikely(more(seqno, expect))) {
1294 ga->gacks[n].ack = htons(expect - 1);
1295 ga->gacks[n].gap = htons(seqno - expect);
1296 if (++n >= MAX_GAP_ACK_BLKS) {
1297 pr_info_ratelimited("Too few Gap ACK blocks!\n");
1298 goto exit;
1299 }
1300 } else if (unlikely(less(seqno, expect))) {
1301 pr_warn("Unexpected skb in deferdq!\n");
1302 continue;
1303 }
1304 expect = seqno + 1;
1305 }
1306
1307 /* last block */
1308 ga->gacks[n].ack = htons(seqno);
1309 ga->gacks[n].gap = 0;
1310 n++;
1311
1312exit:
1313 len = tipc_gap_ack_blks_sz(n);
1314 ga->len = htons(len);
1315 ga->gack_cnt = n;
1316 return len;
1317}
1318
1319/* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing
1320 * acked packets, also doing retransmissions if
1321 * gaps found
1322 * @l: tipc link with transmq queue to be advanced
1323 * @acked: seqno of last packet acked by peer without any gaps before
1324 * @gap: # of gap packets
1325 * @ga: buffer pointer to Gap ACK blocks from peer
1326 * @xmitq: queue for accumulating the retransmitted packets if any
1327 */
1328static void tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
1329 struct tipc_gap_ack_blks *ga,
1330 struct sk_buff_head *xmitq)
1331{
1332 struct sk_buff *skb, *_skb, *tmp;
1333 struct tipc_msg *hdr;
1334 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1335 u16 ack = l->rcv_nxt - 1;
1336 u16 seqno;
1337 u16 n = 0;
1338
1339 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1340 seqno = buf_seqno(skb);
1341
1342next_gap_ack:
1343 if (less_eq(seqno, acked)) {
1344 /* release skb */
1345 __skb_unlink(skb, &l->transmq);
1346 kfree_skb(skb);
1347 } else if (less_eq(seqno, acked + gap)) {
1348 /* retransmit skb */
Tuong Lien382f5982019-04-04 11:09:52 +07001349 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1350 continue;
1351 TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
1352
Tuong Lien91959482019-04-04 11:09:51 +07001353 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
1354 if (!_skb)
1355 continue;
1356 hdr = buf_msg(_skb);
1357 msg_set_ack(hdr, ack);
1358 msg_set_bcast_ack(hdr, bc_ack);
1359 _skb->priority = TC_PRIO_CONTROL;
1360 __skb_queue_tail(xmitq, _skb);
1361 l->stats.retransmitted++;
1362 } else {
1363 /* retry with Gap ACK blocks if any */
1364 if (!ga || n >= ga->gack_cnt)
1365 break;
1366 acked = ntohs(ga->gacks[n].ack);
1367 gap = ntohs(ga->gacks[n].gap);
1368 n++;
1369 goto next_gap_ack;
1370 }
1371 }
1372}
1373
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001374/* tipc_link_build_state_msg: prepare link state message for transmission
Jon Paul Maloy52666982015-10-22 08:51:41 -04001375 *
1376 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1377 * risk of ack storms towards the sender
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001378 */
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001379int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001380{
Jon Paul Maloy52666982015-10-22 08:51:41 -04001381 if (!l)
1382 return 0;
1383
1384 /* Broadcast ACK must be sent via a unicast link => defer to caller */
1385 if (link_is_bc_rcvlink(l)) {
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001386 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
Jon Paul Maloy52666982015-10-22 08:51:41 -04001387 return 0;
1388 l->rcv_unacked = 0;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001389
1390 /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1391 l->snd_nxt = l->rcv_nxt;
1392 return TIPC_LINK_SND_STATE;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001393 }
1394
1395 /* Unicast ACK */
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001396 l->rcv_unacked = 0;
1397 l->stats.sent_acks++;
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001398 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001399 return 0;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001400}
1401
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001402/* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1403 */
1404void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1405{
1406 int mtyp = RESET_MSG;
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001407 struct sk_buff *skb;
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001408
1409 if (l->state == LINK_ESTABLISHING)
1410 mtyp = ACTIVATE_MSG;
1411
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001412 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001413
1414 /* Inform peer that this endpoint is going down if applicable */
1415 skb = skb_peek_tail(xmitq);
1416 if (skb && (l->state == LINK_RESET))
1417 msg_set_peer_stopping(buf_msg(skb), 1);
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001418}
1419
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001420/* tipc_link_build_nack_msg: prepare link nack message for transmission
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001421 * Note that sending of broadcast NACK is coordinated among nodes, to
1422 * reduce the risk of NACK storms towards the sender
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001423 */
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001424static int tipc_link_build_nack_msg(struct tipc_link *l,
1425 struct sk_buff_head *xmitq)
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001426{
1427 u32 def_cnt = ++l->stats.deferred_recv;
Tuong Lien382f5982019-04-04 11:09:52 +07001428 u32 defq_len = skb_queue_len(&l->deferdq);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001429 int match1, match2;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001430
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001431 if (link_is_bc_rcvlink(l)) {
1432 match1 = def_cnt & 0xf;
1433 match2 = tipc_own_addr(l->net) & 0xf;
1434 if (match1 == match2)
1435 return TIPC_LINK_SND_STATE;
1436 return 0;
1437 }
Jon Paul Maloy52666982015-10-22 08:51:41 -04001438
Tuong Lien382f5982019-04-04 11:09:52 +07001439 if (defq_len >= 3 && !((defq_len - 3) % 16))
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001440 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001441 return 0;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001442}
1443
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001444/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001445 * @l: the link that should handle the message
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001446 * @skb: TIPC packet
1447 * @xmitq: queue to place packets to be sent after this call
1448 */
1449int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1450 struct sk_buff_head *xmitq)
1451{
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001452 struct sk_buff_head *defq = &l->deferdq;
Tuong Lien382f5982019-04-04 11:09:52 +07001453 struct tipc_msg *hdr = buf_msg(skb);
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001454 u16 seqno, rcv_nxt, win_lim;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001455 int rc = 0;
1456
Tuong Lien382f5982019-04-04 11:09:52 +07001457 /* Verify and update link state */
1458 if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1459 return tipc_link_proto_rcv(l, skb, xmitq);
1460
1461 /* Don't send probe at next timeout expiration */
1462 l->silent_intv_cnt = 0;
1463
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001464 do {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001465 hdr = buf_msg(skb);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001466 seqno = msg_seqno(hdr);
1467 rcv_nxt = l->rcv_nxt;
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001468 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001469
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001470 if (unlikely(!link_is_up(l))) {
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001471 if (l->state == LINK_ESTABLISHING)
1472 rc = TIPC_LINK_UP_EVT;
1473 goto drop;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001474 }
1475
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001476 /* Drop if outside receive window */
1477 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1478 l->stats.duplicates++;
1479 goto drop;
1480 }
1481
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001482 /* Forward queues and wake up waiting users */
1483 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
Jon Maloya4dc70d2018-07-06 15:22:36 +02001484 l->stale_cnt = 0;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001485 tipc_link_advance_backlog(l, xmitq);
1486 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1487 link_prepare_wakeup(l);
1488 }
1489
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001490 /* Defer delivery if sequence gap */
1491 if (unlikely(seqno != rcv_nxt)) {
Jon Paul Maloy8306f992015-10-15 14:52:43 -04001492 __tipc_skb_queue_sorted(defq, seqno, skb);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001493 rc |= tipc_link_build_nack_msg(l, xmitq);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001494 break;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001495 }
1496
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001497 /* Deliver packet */
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001498 l->rcv_nxt++;
Jon Paul Maloy95901122016-11-25 10:35:02 -05001499 l->stats.recv_pkts++;
Tuong Lien58ee86b2019-04-04 11:09:53 +07001500
1501 if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL))
1502 rc |= tipc_link_tnl_rcv(l, skb, l->inputq);
1503 else if (!tipc_data_input(l, skb, l->inputq))
1504 rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001505 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001506 rc |= tipc_link_build_state_msg(l, xmitq);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001507 if (unlikely(rc & ~TIPC_LINK_SND_STATE))
Jon Paul Maloy52666982015-10-22 08:51:41 -04001508 break;
Tuong Lien382f5982019-04-04 11:09:52 +07001509 } while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001510
1511 return rc;
1512drop:
1513 kfree_skb(skb);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001514 return rc;
1515}
1516
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001517static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001518 bool probe_reply, u16 rcvgap,
1519 int tolerance, int priority,
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001520 struct sk_buff_head *xmitq)
1521{
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001522 struct tipc_link *bcl = l->bc_rcvlink;
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001523 struct sk_buff *skb;
1524 struct tipc_msg *hdr;
1525 struct sk_buff_head *dfq = &l->deferdq;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001526 bool node_up = link_is_up(bcl);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001527 struct tipc_mon_state *mstate = &l->mon_state;
1528 int dlen = 0;
1529 void *data;
Tuong Lien91959482019-04-04 11:09:51 +07001530 u16 glen = 0;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001531
1532 /* Don't send protocol message during reset or link failover */
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001533 if (tipc_link_is_blocked(l))
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001534 return;
1535
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001536 if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1537 return;
1538
1539 if (!skb_queue_empty(dfq))
1540 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1541
1542 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
Tuong Lien91959482019-04-04 11:09:51 +07001543 tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ,
1544 l->addr, tipc_own_addr(l->net), 0, 0, 0);
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001545 if (!skb)
1546 return;
1547
1548 hdr = buf_msg(skb);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001549 data = msg_data(hdr);
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001550 msg_set_session(hdr, l->session);
1551 msg_set_bearer_id(hdr, l->bearer_id);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001552 msg_set_net_plane(hdr, l->net_plane);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001553 msg_set_next_sent(hdr, l->snd_nxt);
1554 msg_set_ack(hdr, l->rcv_nxt - 1);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001555 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
Jon Paul Maloy06bd2b12016-10-27 18:51:55 -04001556 msg_set_bc_ack_invalid(hdr, !node_up);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001557 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001558 msg_set_link_tolerance(hdr, tolerance);
1559 msg_set_linkprio(hdr, priority);
1560 msg_set_redundant_link(hdr, node_up);
1561 msg_set_seq_gap(hdr, 0);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001562 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001563
1564 if (mtyp == STATE_MSG) {
Jon Maloy9012de52018-07-10 01:07:35 +02001565 if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
1566 msg_set_seqno(hdr, l->snd_nxt_state++);
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001567 msg_set_seq_gap(hdr, rcvgap);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001568 msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001569 msg_set_probe(hdr, probe);
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001570 msg_set_is_keepalive(hdr, probe || probe_reply);
Tuong Lien91959482019-04-04 11:09:51 +07001571 if (l->peer_caps & TIPC_GAP_ACK_BLOCK)
1572 glen = tipc_build_gap_ack_blks(l, data);
1573 tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id);
1574 msg_set_size(hdr, INT_H_SIZE + glen + dlen);
1575 skb_trim(skb, INT_H_SIZE + glen + dlen);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001576 l->stats.sent_states++;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001577 l->rcv_unacked = 0;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001578 } else {
1579 /* RESET_MSG or ACTIVATE_MSG */
Tuong Lien91986ee2019-02-11 13:29:43 +07001580 if (mtyp == ACTIVATE_MSG) {
1581 msg_set_dest_session_valid(hdr, 1);
1582 msg_set_dest_session(hdr, l->peer_session);
1583 }
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001584 msg_set_max_pkt(hdr, l->advertised_mtu);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001585 strcpy(data, l->if_name);
1586 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1587 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001588 }
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001589 if (probe)
1590 l->stats.sent_probes++;
1591 if (rcvgap)
1592 l->stats.sent_nacks++;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001593 skb->priority = TC_PRIO_CONTROL;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001594 __skb_queue_tail(xmitq, skb);
Tuong Lien26574db2018-12-19 09:17:57 +07001595 trace_tipc_proto_build(skb, false, l->name);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001596}
Per Lidenb97bf3f2006-01-02 19:04:38 +01001597
LUU Duc Canhc140eb12018-09-26 21:00:54 +02001598void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
1599 struct sk_buff_head *xmitq)
1600{
1601 u32 onode = tipc_own_addr(l->net);
1602 struct tipc_msg *hdr, *ihdr;
1603 struct sk_buff_head tnlq;
1604 struct sk_buff *skb;
1605 u32 dnode = l->addr;
1606
1607 skb_queue_head_init(&tnlq);
1608 skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
1609 INT_H_SIZE, BASIC_H_SIZE,
1610 dnode, onode, 0, 0, 0);
1611 if (!skb) {
1612 pr_warn("%sunable to create tunnel packet\n", link_co_err);
1613 return;
1614 }
1615
1616 hdr = buf_msg(skb);
1617 msg_set_msgcnt(hdr, 1);
1618 msg_set_bearer_id(hdr, l->peer_bearer_id);
1619
1620 ihdr = (struct tipc_msg *)msg_data(hdr);
1621 tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1622 BASIC_H_SIZE, dnode);
1623 msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
1624 __skb_queue_tail(&tnlq, skb);
1625 tipc_link_xmit(l, &tnlq, xmitq);
1626}
1627
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001628/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001629 * with contents of the link's transmit and backlog queues.
Per Lidenb97bf3f2006-01-02 19:04:38 +01001630 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001631void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1632 int mtyp, struct sk_buff_head *xmitq)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001633{
Tuong Lien58ee86b2019-04-04 11:09:53 +07001634 struct sk_buff_head *fdefq = &tnl->failover_deferdq;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001635 struct sk_buff *skb, *tnlskb;
1636 struct tipc_msg *hdr, tnlhdr;
1637 struct sk_buff_head *queue = &l->transmq;
1638 struct sk_buff_head tmpxq, tnlq;
1639 u16 pktlen, pktcnt, seqno = l->snd_nxt;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001640
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001641 if (!tnl)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001642 return;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001643
1644 skb_queue_head_init(&tnlq);
1645 skb_queue_head_init(&tmpxq);
1646
1647 /* At least one packet required for safe algorithm => add dummy */
1648 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001649 BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001650 0, 0, TIPC_ERR_NO_PORT);
Ying Xuea6ca1092014-11-26 11:41:55 +08001651 if (!skb) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001652 pr_warn("%sunable to create tunnel packet\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001653 return;
Allan Stephens5392d642006-06-25 23:52:50 -07001654 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001655 skb_queue_tail(&tnlq, skb);
1656 tipc_link_xmit(l, &tnlq, &tmpxq);
1657 __skb_queue_purge(&tmpxq);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001658
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001659 /* Initialize reusable tunnel packet header */
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001660 tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001661 mtyp, INT_H_SIZE, l->addr);
Tuong Lien58ee86b2019-04-04 11:09:53 +07001662 if (mtyp == SYNCH_MSG)
1663 pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq));
1664 else
1665 pktcnt = skb_queue_len(&l->transmq);
1666 pktcnt += skb_queue_len(&l->backlogq);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001667 msg_set_msgcnt(&tnlhdr, pktcnt);
1668 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1669tnl:
1670 /* Wrap each packet into a tunnel packet */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001671 skb_queue_walk(queue, skb) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001672 hdr = buf_msg(skb);
1673 if (queue == &l->backlogq)
1674 msg_set_seqno(hdr, seqno++);
1675 pktlen = msg_size(hdr);
1676 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
Parthasarathy Bhuvaragan57d5f642017-01-13 15:46:25 +01001677 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001678 if (!tnlskb) {
1679 pr_warn("%sunable to send packet\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001680 return;
1681 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001682 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1683 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1684 __skb_queue_tail(&tnlq, tnlskb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001685 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001686 if (queue != &l->backlogq) {
1687 queue = &l->backlogq;
1688 goto tnl;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -04001689 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001690
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001691 tipc_link_xmit(tnl, &tnlq, xmitq);
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001692
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001693 if (mtyp == FAILOVER_MSG) {
1694 tnl->drop_point = l->rcv_nxt;
1695 tnl->failover_reasm_skb = l->reasm_buf;
1696 l->reasm_buf = NULL;
Tuong Lien58ee86b2019-04-04 11:09:53 +07001697
1698 /* Failover the link's deferdq */
1699 if (unlikely(!skb_queue_empty(fdefq))) {
1700 pr_warn("Link failover deferdq not empty: %d!\n",
1701 skb_queue_len(fdefq));
1702 __skb_queue_purge(fdefq);
1703 }
1704 skb_queue_splice_init(&l->deferdq, fdefq);
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001705 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001706}
1707
Tuong Lienc0b14a082019-05-02 17:23:23 +07001708/**
1709 * tipc_link_failover_prepare() - prepare tnl for link failover
1710 *
1711 * This is a special version of the precursor - tipc_link_tnl_prepare(),
1712 * see the tipc_node_link_failover() for details
1713 *
1714 * @l: failover link
1715 * @tnl: tunnel link
1716 * @xmitq: queue for messages to be xmited
1717 */
1718void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
1719 struct sk_buff_head *xmitq)
1720{
1721 struct sk_buff_head *fdefq = &tnl->failover_deferdq;
1722
1723 tipc_link_create_dummy_tnl_msg(tnl, xmitq);
1724
1725 /* This failover link enpoint was never established before,
1726 * so it has not received anything from peer.
1727 * Otherwise, it must be a normal failover situation or the
1728 * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes
1729 * would have to start over from scratch instead.
1730 */
1731 WARN_ON(l && tipc_link_is_up(l));
1732 tnl->drop_point = 1;
1733 tnl->failover_reasm_skb = NULL;
1734
1735 /* Initiate the link's failover deferdq */
1736 if (unlikely(!skb_queue_empty(fdefq))) {
1737 pr_warn("Link failover deferdq not empty: %d!\n",
1738 skb_queue_len(fdefq));
1739 __skb_queue_purge(fdefq);
1740 }
1741}
1742
Jon Maloy7ea817f2018-07-10 01:07:36 +02001743/* tipc_link_validate_msg(): validate message against current link state
1744 * Returns true if message should be accepted, otherwise false
1745 */
1746bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
1747{
1748 u16 curr_session = l->peer_session;
1749 u16 session = msg_session(hdr);
1750 int mtyp = msg_type(hdr);
1751
1752 if (msg_user(hdr) != LINK_PROTOCOL)
1753 return true;
1754
1755 switch (mtyp) {
1756 case RESET_MSG:
1757 if (!l->in_session)
1758 return true;
1759 /* Accept only RESET with new session number */
1760 return more(session, curr_session);
1761 case ACTIVATE_MSG:
1762 if (!l->in_session)
1763 return true;
1764 /* Accept only ACTIVATE with new or current session number */
1765 return !less(session, curr_session);
1766 case STATE_MSG:
1767 /* Accept only STATE with current session number */
1768 if (!l->in_session)
1769 return false;
1770 if (session != curr_session)
1771 return false;
LUU Duc Canhd949cfe2018-09-26 22:28:52 +02001772 /* Extra sanity check */
1773 if (!link_is_up(l) && msg_ack(hdr))
1774 return false;
Jon Maloy7ea817f2018-07-10 01:07:36 +02001775 if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
1776 return true;
1777 /* Accept only STATE with new sequence number */
1778 return !less(msg_seqno(hdr), l->rcv_nxt_state);
1779 default:
1780 return false;
1781 }
1782}
1783
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001784/* tipc_link_proto_rcv(): receive link level protocol message :
1785 * Note that network plane id propagates through the network, and may
1786 * change at any time. The node with lowest numerical id determines
1787 * network plane
1788 */
1789static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1790 struct sk_buff_head *xmitq)
1791{
1792 struct tipc_msg *hdr = buf_msg(skb);
Tuong Lien91959482019-04-04 11:09:51 +07001793 struct tipc_gap_ack_blks *ga = NULL;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001794 u16 rcvgap = 0;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001795 u16 ack = msg_ack(hdr);
1796 u16 gap = msg_seq_gap(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001797 u16 peers_snd_nxt = msg_next_sent(hdr);
1798 u16 peers_tol = msg_link_tolerance(hdr);
1799 u16 peers_prio = msg_linkprio(hdr);
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04001800 u16 rcv_nxt = l->rcv_nxt;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001801 u16 dlen = msg_data_sz(hdr);
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001802 int mtyp = msg_type(hdr);
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001803 bool reply = msg_probe(hdr);
Tuong Lien91959482019-04-04 11:09:51 +07001804 u16 glen = 0;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001805 void *data;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001806 char *if_name;
1807 int rc = 0;
1808
Tuong Lien26574db2018-12-19 09:17:57 +07001809 trace_tipc_proto_rcv(skb, false, l->name);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001810 if (tipc_link_is_blocked(l) || !xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001811 goto exit;
1812
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001813 if (tipc_own_addr(l->net) > msg_prevnode(hdr))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001814 l->net_plane = msg_net_plane(hdr);
1815
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001816 skb_linearize(skb);
1817 hdr = buf_msg(skb);
1818 data = msg_data(hdr);
1819
Tuong Lien26574db2018-12-19 09:17:57 +07001820 if (!tipc_link_validate_msg(l, hdr)) {
1821 trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
1822 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!");
Jon Maloy7ea817f2018-07-10 01:07:36 +02001823 goto exit;
Tuong Lien26574db2018-12-19 09:17:57 +07001824 }
Jon Maloy7ea817f2018-07-10 01:07:36 +02001825
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001826 switch (mtyp) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001827 case RESET_MSG:
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001828 case ACTIVATE_MSG:
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001829 /* Complete own link name with peer's interface name */
1830 if_name = strrchr(l->name, ':') + 1;
1831 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1832 break;
1833 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1834 break;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001835 strncpy(if_name, data, TIPC_MAX_IF_NAME);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001836
1837 /* Update own tolerance if peer indicates a non-zero value */
Jon Maloy047491e2018-10-10 17:34:01 +02001838 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001839 l->tolerance = peers_tol;
Jon Maloy047491e2018-10-10 17:34:01 +02001840 l->bc_rcvlink->tolerance = peers_tol;
1841 }
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001842 /* Update own priority if peer's priority is higher */
1843 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1844 l->priority = peers_prio;
1845
Jon Maloy7ab412d2018-11-10 17:30:24 -05001846 /* If peer is going down we want full re-establish cycle */
1847 if (msg_peer_stopping(hdr)) {
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001848 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Maloy7ab412d2018-11-10 17:30:24 -05001849 break;
1850 }
Tuong Lien91986ee2019-02-11 13:29:43 +07001851
1852 /* If this endpoint was re-created while peer was ESTABLISHING
1853 * it doesn't know current session number. Force re-synch.
1854 */
1855 if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
1856 l->session != msg_dest_session(hdr)) {
1857 if (less(l->session, msg_dest_session(hdr)))
1858 l->session = msg_dest_session(hdr) + 1;
1859 break;
1860 }
1861
Jon Maloy7ab412d2018-11-10 17:30:24 -05001862 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1863 if (mtyp == RESET_MSG || !link_is_up(l))
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001864 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1865
1866 /* ACTIVATE_MSG takes up link if it was already locally reset */
Jon Maloy7ab412d2018-11-10 17:30:24 -05001867 if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001868 rc = TIPC_LINK_UP_EVT;
1869
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001870 l->peer_session = msg_session(hdr);
Jon Maloy7ea817f2018-07-10 01:07:36 +02001871 l->in_session = true;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001872 l->peer_bearer_id = msg_bearer_id(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001873 if (l->mtu > msg_max_pkt(hdr))
1874 l->mtu = msg_max_pkt(hdr);
1875 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001876
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001877 case STATE_MSG:
Jon Maloy9012de52018-07-10 01:07:35 +02001878 l->rcv_nxt_state = msg_seqno(hdr) + 1;
1879
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001880 /* Update own tolerance if peer indicates a non-zero value */
Jon Maloy047491e2018-10-10 17:34:01 +02001881 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001882 l->tolerance = peers_tol;
Jon Maloy047491e2018-10-10 17:34:01 +02001883 l->bc_rcvlink->tolerance = peers_tol;
1884 }
Jon Paul Maloyf7967552016-11-23 21:05:26 -05001885 /* Update own prio if peer indicates a different value */
1886 if ((peers_prio != l->priority) &&
1887 in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
Richard Alpe81729812016-02-01 08:19:57 +01001888 l->priority = peers_prio;
1889 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1890 }
1891
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001892 l->silent_intv_cnt = 0;
1893 l->stats.recv_states++;
1894 if (msg_probe(hdr))
1895 l->stats.recv_probes++;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001896
1897 if (!link_is_up(l)) {
1898 if (l->state == LINK_ESTABLISHING)
1899 rc = TIPC_LINK_UP_EVT;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001900 break;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001901 }
Tuong Lien91959482019-04-04 11:09:51 +07001902
1903 /* Receive Gap ACK blocks from peer if any */
1904 if (l->peer_caps & TIPC_GAP_ACK_BLOCK) {
1905 ga = (struct tipc_gap_ack_blks *)data;
1906 glen = ntohs(ga->len);
1907 /* sanity check: if failed, ignore Gap ACK blocks */
1908 if (glen != tipc_gap_ack_blks_sz(ga->gack_cnt))
1909 ga = NULL;
1910 }
1911
1912 tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001913 &l->mon_state, l->bearer_id);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001914
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001915 /* Send NACK if peer has sent pkts we haven't received yet */
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04001916 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001917 rcvgap = peers_snd_nxt - l->rcv_nxt;
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001918 if (rcvgap || reply)
1919 tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
1920 rcvgap, 0, 0, xmitq);
Tuong Lien91959482019-04-04 11:09:51 +07001921
1922 tipc_link_advance_transmq(l, ack, gap, ga, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001923
1924 /* If NACK, retransmit will now start at right position */
Tuong Lien91959482019-04-04 11:09:51 +07001925 if (gap)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001926 l->stats.recv_nacks++;
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001927
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001928 tipc_link_advance_backlog(l, xmitq);
1929 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1930 link_prepare_wakeup(l);
1931 }
1932exit:
1933 kfree_skb(skb);
1934 return rc;
1935}
1936
Jon Paul Maloy52666982015-10-22 08:51:41 -04001937/* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1938 */
1939static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
1940 u16 peers_snd_nxt,
1941 struct sk_buff_head *xmitq)
1942{
1943 struct sk_buff *skb;
1944 struct tipc_msg *hdr;
1945 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
1946 u16 ack = l->rcv_nxt - 1;
1947 u16 gap_to = peers_snd_nxt - 1;
1948
1949 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001950 0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001951 if (!skb)
1952 return false;
1953 hdr = buf_msg(skb);
1954 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1955 msg_set_bcast_ack(hdr, ack);
1956 msg_set_bcgap_after(hdr, ack);
1957 if (dfrd_skb)
1958 gap_to = buf_seqno(dfrd_skb) - 1;
1959 msg_set_bcgap_to(hdr, gap_to);
1960 msg_set_non_seq(hdr, bcast);
1961 __skb_queue_tail(xmitq, skb);
1962 return true;
1963}
1964
1965/* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1966 *
1967 * Give a newly added peer node the sequence number where it should
1968 * start receiving and acking broadcast packets.
1969 */
Wu Fengguang742e0382015-10-24 22:56:01 +08001970static void tipc_link_build_bc_init_msg(struct tipc_link *l,
1971 struct sk_buff_head *xmitq)
Jon Paul Maloy52666982015-10-22 08:51:41 -04001972{
1973 struct sk_buff_head list;
1974
1975 __skb_queue_head_init(&list);
1976 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
1977 return;
Jon Paul Maloy06bd2b12016-10-27 18:51:55 -04001978 msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001979 tipc_link_xmit(l, &list, xmitq);
1980}
1981
1982/* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1983 */
1984void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
1985{
1986 int mtyp = msg_type(hdr);
1987 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1988
1989 if (link_is_up(l))
1990 return;
1991
1992 if (msg_user(hdr) == BCAST_PROTOCOL) {
1993 l->rcv_nxt = peers_snd_nxt;
1994 l->state = LINK_ESTABLISHED;
1995 return;
1996 }
1997
1998 if (l->peer_caps & TIPC_BCAST_SYNCH)
1999 return;
2000
2001 if (msg_peer_node_is_up(hdr))
2002 return;
2003
2004 /* Compatibility: accept older, less safe initial synch data */
2005 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
2006 l->rcv_nxt = peers_snd_nxt;
2007}
2008
2009/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
2010 */
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002011int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
2012 struct sk_buff_head *xmitq)
Jon Paul Maloy52666982015-10-22 08:51:41 -04002013{
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -04002014 struct tipc_link *snd_l = l->bc_sndlink;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002015 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002016 u16 from = msg_bcast_ack(hdr) + 1;
2017 u16 to = from + msg_bc_gap(hdr) - 1;
2018 int rc = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002019
2020 if (!link_is_up(l))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002021 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002022
2023 if (!msg_peer_node_is_up(hdr))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002024 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002025
Jon Paul Maloy2d18ac42016-07-11 16:08:35 -04002026 /* Open when peer ackowledges our bcast init msg (pkt #1) */
2027 if (msg_ack(hdr))
2028 l->bc_peer_is_up = true;
2029
2030 if (!l->bc_peer_is_up)
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002031 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002032
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -04002033 l->stats.recv_nacks++;
2034
Jon Paul Maloy52666982015-10-22 08:51:41 -04002035 /* Ignore if peers_snd_nxt goes beyond receive window */
2036 if (more(peers_snd_nxt, l->rcv_nxt + l->window))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002037 return rc;
2038
LUU Duc Canh31c4f4c2018-11-10 14:23:50 -05002039 rc = tipc_link_retrans(snd_l, l, from, to, xmitq);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002040
2041 l->snd_nxt = peers_snd_nxt;
2042 if (link_bc_rcv_gap(l))
2043 rc |= TIPC_LINK_SND_STATE;
2044
2045 /* Return now if sender supports nack via STATE messages */
2046 if (l->peer_caps & TIPC_BCAST_STATE_NACK)
2047 return rc;
2048
2049 /* Otherwise, be backwards compatible */
Jon Paul Maloy52666982015-10-22 08:51:41 -04002050
2051 if (!more(peers_snd_nxt, l->rcv_nxt)) {
2052 l->nack_state = BC_NACK_SND_CONDITIONAL;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002053 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002054 }
2055
2056 /* Don't NACK if one was recently sent or peeked */
2057 if (l->nack_state == BC_NACK_SND_SUPPRESS) {
2058 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002059 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002060 }
2061
2062 /* Conditionally delay NACK sending until next synch rcv */
2063 if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
2064 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2065 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002066 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002067 }
2068
2069 /* Send NACK now but suppress next one */
2070 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
2071 l->nack_state = BC_NACK_SND_SUPPRESS;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002072 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002073}
2074
2075void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
2076 struct sk_buff_head *xmitq)
2077{
2078 struct sk_buff *skb, *tmp;
2079 struct tipc_link *snd_l = l->bc_sndlink;
2080
2081 if (!link_is_up(l) || !l->bc_peer_is_up)
2082 return;
2083
2084 if (!more(acked, l->acked))
2085 return;
2086
Tuong Lien26574db2018-12-19 09:17:57 +07002087 trace_tipc_link_bc_ack(l, l->acked, acked, &snd_l->transmq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04002088 /* Skip over packets peer has already acked */
2089 skb_queue_walk(&snd_l->transmq, skb) {
2090 if (more(buf_seqno(skb), l->acked))
2091 break;
2092 }
2093
2094 /* Update/release the packets peer is acking now */
2095 skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
2096 if (more(buf_seqno(skb), acked))
2097 break;
2098 if (!--TIPC_SKB_CB(skb)->ackers) {
2099 __skb_unlink(skb, &snd_l->transmq);
2100 kfree_skb(skb);
2101 }
2102 }
2103 l->acked = acked;
2104 tipc_link_advance_backlog(snd_l, xmitq);
2105 if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
2106 link_prepare_wakeup(snd_l);
2107}
2108
2109/* tipc_link_bc_nack_rcv(): receive broadcast nack message
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002110 * This function is here for backwards compatibility, since
2111 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
Jon Paul Maloy52666982015-10-22 08:51:41 -04002112 */
2113int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
2114 struct sk_buff_head *xmitq)
2115{
2116 struct tipc_msg *hdr = buf_msg(skb);
2117 u32 dnode = msg_destnode(hdr);
2118 int mtyp = msg_type(hdr);
2119 u16 acked = msg_bcast_ack(hdr);
2120 u16 from = acked + 1;
2121 u16 to = msg_bcgap_to(hdr);
2122 u16 peers_snd_nxt = to + 1;
2123 int rc = 0;
2124
2125 kfree_skb(skb);
2126
2127 if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
2128 return 0;
2129
2130 if (mtyp != STATE_MSG)
2131 return 0;
2132
Jon Paul Maloye74a3862016-03-03 14:23:21 -05002133 if (dnode == tipc_own_addr(l->net)) {
Jon Paul Maloy52666982015-10-22 08:51:41 -04002134 tipc_link_bc_ack_rcv(l, acked, xmitq);
Jon Paul Maloy40501f902017-08-21 17:59:30 +02002135 rc = tipc_link_retrans(l->bc_sndlink, l, from, to, xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04002136 l->stats.recv_nacks++;
2137 return rc;
2138 }
2139
2140 /* Msg for other node => suppress own NACK at next sync if applicable */
2141 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
2142 l->nack_state = BC_NACK_SND_SUPPRESS;
2143
2144 return 0;
2145}
2146
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04002147void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
Per Lidenb97bf3f2006-01-02 19:04:38 +01002148{
Jon Maloy218527f2018-03-29 23:20:41 +02002149 int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04002150
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04002151 l->window = win;
Jon Paul Maloy5a0950c2016-08-16 11:53:51 -04002152 l->backlog[TIPC_LOW_IMPORTANCE].limit = max_t(u16, 50, win);
2153 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = max_t(u16, 100, win * 2);
2154 l->backlog[TIPC_HIGH_IMPORTANCE].limit = max_t(u16, 150, win * 3);
2155 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04002156 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
Per Lidenb97bf3f2006-01-02 19:04:38 +01002157}
2158
Allan Stephens5c216e12011-10-18 11:34:29 -04002159/**
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002160 * link_reset_stats - reset link statistics
Jon Paul Maloy1a906322015-11-19 14:30:47 -05002161 * @l: pointer to link
Per Lidenb97bf3f2006-01-02 19:04:38 +01002162 */
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002163void tipc_link_reset_stats(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +01002164{
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002165 memset(&l->stats, 0, sizeof(l->stats));
Per Lidenb97bf3f2006-01-02 19:04:38 +01002166}
2167
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04002168static void link_print(struct tipc_link *l, const char *str)
Per Lidenb97bf3f2006-01-02 19:04:38 +01002169{
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04002170 struct sk_buff *hskb = skb_peek(&l->transmq);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04002171 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04002172 u16 tail = l->snd_nxt - 1;
Ying Xue7a2f7d12014-04-21 10:55:46 +08002173
Jon Paul Maloy662921c2015-07-30 18:24:21 -04002174 pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04002175 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
2176 skb_queue_len(&l->transmq), head, tail,
2177 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
Per Lidenb97bf3f2006-01-02 19:04:38 +01002178}
Richard Alpe0655f6a2014-11-20 10:29:07 +01002179
2180/* Parse and validate nested (link) properties valid for media, bearer and link
2181 */
2182int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
2183{
2184 int err;
2185
Johannes Berg8cb08172019-04-26 14:07:28 +02002186 err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop,
2187 tipc_nl_prop_policy, NULL);
Richard Alpe0655f6a2014-11-20 10:29:07 +01002188 if (err)
2189 return err;
2190
2191 if (props[TIPC_NLA_PROP_PRIO]) {
2192 u32 prio;
2193
2194 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2195 if (prio > TIPC_MAX_LINK_PRI)
2196 return -EINVAL;
2197 }
2198
2199 if (props[TIPC_NLA_PROP_TOL]) {
2200 u32 tol;
2201
2202 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2203 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
2204 return -EINVAL;
2205 }
2206
2207 if (props[TIPC_NLA_PROP_WIN]) {
2208 u32 win;
2209
2210 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2211 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
2212 return -EINVAL;
2213 }
2214
2215 return 0;
2216}
Richard Alpe7be57fc2014-11-20 10:29:12 +01002217
Richard Alped8182802014-11-24 11:10:29 +01002218static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
Richard Alpe7be57fc2014-11-20 10:29:12 +01002219{
2220 int i;
2221 struct nlattr *stats;
2222
2223 struct nla_map {
2224 u32 key;
2225 u32 val;
2226 };
2227
2228 struct nla_map map[] = {
Jon Paul Maloy95901122016-11-25 10:35:02 -05002229 {TIPC_NLA_STATS_RX_INFO, 0},
Richard Alpe7be57fc2014-11-20 10:29:12 +01002230 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2231 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2232 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2233 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
Jon Paul Maloy95901122016-11-25 10:35:02 -05002234 {TIPC_NLA_STATS_TX_INFO, 0},
Richard Alpe7be57fc2014-11-20 10:29:12 +01002235 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2236 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2237 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2238 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2239 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2240 s->msg_length_counts : 1},
2241 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2242 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2243 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2244 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2245 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2246 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2247 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2248 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2249 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2250 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
2251 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2252 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2253 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2254 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
2255 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2256 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2257 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2258 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2259 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2260 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2261 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2262 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2263 (s->accu_queue_sz / s->queue_sz_counts) : 0}
2264 };
2265
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002266 stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002267 if (!stats)
2268 return -EMSGSIZE;
2269
2270 for (i = 0; i < ARRAY_SIZE(map); i++)
2271 if (nla_put_u32(skb, map[i].key, map[i].val))
2272 goto msg_full;
2273
2274 nla_nest_end(skb, stats);
2275
2276 return 0;
2277msg_full:
2278 nla_nest_cancel(skb, stats);
2279
2280 return -EMSGSIZE;
2281}
2282
2283/* Caller should hold appropriate locks to protect the link */
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05002284int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2285 struct tipc_link *link, int nlflags)
Richard Alpe7be57fc2014-11-20 10:29:12 +01002286{
Jon Maloy23fd3ea2018-03-22 20:42:49 +01002287 u32 self = tipc_own_addr(net);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002288 struct nlattr *attrs;
2289 struct nlattr *prop;
Jon Maloy23fd3ea2018-03-22 20:42:49 +01002290 void *hdr;
2291 int err;
Richard Alpe7be57fc2014-11-20 10:29:12 +01002292
Richard Alpebfb3e5d2015-02-09 09:50:03 +01002293 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
Nicolas Dichtelf2f67392015-04-28 18:33:50 +02002294 nlflags, TIPC_NL_LINK_GET);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002295 if (!hdr)
2296 return -EMSGSIZE;
2297
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002298 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002299 if (!attrs)
2300 goto msg_full;
2301
2302 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2303 goto attr_msg_full;
Jon Maloy23fd3ea2018-03-22 20:42:49 +01002304 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002305 goto attr_msg_full;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04002306 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002307 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002308 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002309 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002310 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002311 goto attr_msg_full;
2312
2313 if (tipc_link_is_up(link))
2314 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2315 goto attr_msg_full;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04002316 if (link->active)
Richard Alpe7be57fc2014-11-20 10:29:12 +01002317 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2318 goto attr_msg_full;
2319
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002320 prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002321 if (!prop)
2322 goto attr_msg_full;
2323 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2324 goto prop_msg_full;
2325 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2326 goto prop_msg_full;
2327 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04002328 link->window))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002329 goto prop_msg_full;
2330 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2331 goto prop_msg_full;
2332 nla_nest_end(msg->skb, prop);
2333
2334 err = __tipc_nl_add_stats(msg->skb, &link->stats);
2335 if (err)
2336 goto attr_msg_full;
2337
2338 nla_nest_end(msg->skb, attrs);
2339 genlmsg_end(msg->skb, hdr);
2340
2341 return 0;
2342
2343prop_msg_full:
2344 nla_nest_cancel(msg->skb, prop);
2345attr_msg_full:
2346 nla_nest_cancel(msg->skb, attrs);
2347msg_full:
2348 genlmsg_cancel(msg->skb, hdr);
2349
2350 return -EMSGSIZE;
2351}
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002352
2353static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2354 struct tipc_stats *stats)
2355{
2356 int i;
2357 struct nlattr *nest;
2358
2359 struct nla_map {
2360 __u32 key;
2361 __u32 val;
2362 };
2363
2364 struct nla_map map[] = {
Jon Paul Maloy95901122016-11-25 10:35:02 -05002365 {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002366 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2367 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2368 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2369 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
Jon Paul Maloy95901122016-11-25 10:35:02 -05002370 {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002371 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2372 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2373 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2374 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2375 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2376 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2377 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2378 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2379 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2380 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2381 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2382 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2383 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2384 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2385 };
2386
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002387 nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002388 if (!nest)
2389 return -EMSGSIZE;
2390
2391 for (i = 0; i < ARRAY_SIZE(map); i++)
2392 if (nla_put_u32(skb, map[i].key, map[i].val))
2393 goto msg_full;
2394
2395 nla_nest_end(skb, nest);
2396
2397 return 0;
2398msg_full:
2399 nla_nest_cancel(skb, nest);
2400
2401 return -EMSGSIZE;
2402}
2403
2404int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
2405{
2406 int err;
2407 void *hdr;
2408 struct nlattr *attrs;
2409 struct nlattr *prop;
2410 struct tipc_net *tn = net_generic(net, tipc_net_id);
Hoang Le02ec6ca2019-03-19 18:49:48 +07002411 u32 bc_mode = tipc_bcast_get_broadcast_mode(net);
2412 u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002413 struct tipc_link *bcl = tn->bcl;
2414
2415 if (!bcl)
2416 return 0;
2417
2418 tipc_bcast_lock(net);
2419
2420 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2421 NLM_F_MULTI, TIPC_NL_LINK_GET);
Insu Yunb53ce3e2016-02-17 11:47:35 -05002422 if (!hdr) {
2423 tipc_bcast_unlock(net);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002424 return -EMSGSIZE;
Insu Yunb53ce3e2016-02-17 11:47:35 -05002425 }
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002426
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002427 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002428 if (!attrs)
2429 goto msg_full;
2430
2431 /* The broadcast link is always up */
2432 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2433 goto attr_msg_full;
2434
2435 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2436 goto attr_msg_full;
2437 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2438 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002439 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002440 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002441 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002442 goto attr_msg_full;
2443
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002444 prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002445 if (!prop)
2446 goto attr_msg_full;
2447 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
2448 goto prop_msg_full;
Hoang Le02ec6ca2019-03-19 18:49:48 +07002449 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode))
2450 goto prop_msg_full;
2451 if (bc_mode & BCLINK_MODE_SEL)
2452 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO,
2453 bc_ratio))
2454 goto prop_msg_full;
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002455 nla_nest_end(msg->skb, prop);
2456
2457 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2458 if (err)
2459 goto attr_msg_full;
2460
2461 tipc_bcast_unlock(net);
2462 nla_nest_end(msg->skb, attrs);
2463 genlmsg_end(msg->skb, hdr);
2464
2465 return 0;
2466
2467prop_msg_full:
2468 nla_nest_cancel(msg->skb, prop);
2469attr_msg_full:
2470 nla_nest_cancel(msg->skb, attrs);
2471msg_full:
2472 tipc_bcast_unlock(net);
2473 genlmsg_cancel(msg->skb, hdr);
2474
2475 return -EMSGSIZE;
2476}
2477
Richard Alped01332f2016-02-01 08:19:56 +01002478void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2479 struct sk_buff_head *xmitq)
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002480{
2481 l->tolerance = tol;
Jon Maloy047491e2018-10-10 17:34:01 +02002482 if (l->bc_rcvlink)
2483 l->bc_rcvlink->tolerance = tol;
Jon Maloy37c64cf2018-02-14 13:34:39 +01002484 if (link_is_up(l))
2485 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002486}
2487
Richard Alped01332f2016-02-01 08:19:56 +01002488void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2489 struct sk_buff_head *xmitq)
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002490{
2491 l->priority = prio;
Jon Maloy8d6e79d2017-11-08 09:59:26 +01002492 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002493}
2494
2495void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2496{
2497 l->abort_limit = limit;
2498}
Tuong Lienb4b97712018-12-19 09:17:56 +07002499
2500char *tipc_link_name_ext(struct tipc_link *l, char *buf)
2501{
2502 if (!l)
2503 scnprintf(buf, TIPC_MAX_LINK_NAME, "null");
2504 else if (link_is_bc_sndlink(l))
2505 scnprintf(buf, TIPC_MAX_LINK_NAME, "broadcast-sender");
2506 else if (link_is_bc_rcvlink(l))
2507 scnprintf(buf, TIPC_MAX_LINK_NAME,
2508 "broadcast-receiver, peer %x", l->addr);
2509 else
2510 memcpy(buf, l->name, TIPC_MAX_LINK_NAME);
2511
2512 return buf;
2513}
2514
2515/**
2516 * tipc_link_dump - dump TIPC link data
2517 * @l: tipc link to be dumped
2518 * @dqueues: bitmask to decide if any link queue to be dumped?
2519 * - TIPC_DUMP_NONE: don't dump link queues
2520 * - TIPC_DUMP_TRANSMQ: dump link transmq queue
2521 * - TIPC_DUMP_BACKLOGQ: dump link backlog queue
2522 * - TIPC_DUMP_DEFERDQ: dump link deferd queue
2523 * - TIPC_DUMP_INPUTQ: dump link input queue
2524 * - TIPC_DUMP_WAKEUP: dump link wakeup queue
2525 * - TIPC_DUMP_ALL: dump all the link queues above
2526 * @buf: returned buffer of dump data in format
2527 */
2528int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
2529{
2530 int i = 0;
2531 size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN;
2532 struct sk_buff_head *list;
2533 struct sk_buff *hskb, *tskb;
2534 u32 len;
2535
2536 if (!l) {
2537 i += scnprintf(buf, sz, "link data: (null)\n");
2538 return i;
2539 }
2540
2541 i += scnprintf(buf, sz, "link data: %x", l->addr);
2542 i += scnprintf(buf + i, sz - i, " %x", l->state);
2543 i += scnprintf(buf + i, sz - i, " %u", l->in_session);
2544 i += scnprintf(buf + i, sz - i, " %u", l->session);
2545 i += scnprintf(buf + i, sz - i, " %u", l->peer_session);
2546 i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt);
2547 i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
2548 i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state);
2549 i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state);
2550 i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
2551 i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
2552 i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
2553 i += scnprintf(buf + i, sz - i, " %u", l->prev_from);
2554 i += scnprintf(buf + i, sz - i, " %u", l->stale_cnt);
2555 i += scnprintf(buf + i, sz - i, " %u", l->acked);
2556
2557 list = &l->transmq;
2558 len = skb_queue_len(list);
2559 hskb = skb_peek(list);
2560 tskb = skb_peek_tail(list);
2561 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2562 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2563 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2564
2565 list = &l->deferdq;
2566 len = skb_queue_len(list);
2567 hskb = skb_peek(list);
2568 tskb = skb_peek_tail(list);
2569 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2570 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2571 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2572
2573 list = &l->backlogq;
2574 len = skb_queue_len(list);
2575 hskb = skb_peek(list);
2576 tskb = skb_peek_tail(list);
2577 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2578 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2579 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2580
2581 list = l->inputq;
2582 len = skb_queue_len(list);
2583 hskb = skb_peek(list);
2584 tskb = skb_peek_tail(list);
2585 i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len,
2586 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2587 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2588
2589 if (dqueues & TIPC_DUMP_TRANSMQ) {
2590 i += scnprintf(buf + i, sz - i, "transmq: ");
2591 i += tipc_list_dump(&l->transmq, false, buf + i);
2592 }
2593 if (dqueues & TIPC_DUMP_BACKLOGQ) {
2594 i += scnprintf(buf + i, sz - i,
2595 "backlogq: <%u %u %u %u %u>, ",
2596 l->backlog[TIPC_LOW_IMPORTANCE].len,
2597 l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
2598 l->backlog[TIPC_HIGH_IMPORTANCE].len,
2599 l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
2600 l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
2601 i += tipc_list_dump(&l->backlogq, false, buf + i);
2602 }
2603 if (dqueues & TIPC_DUMP_DEFERDQ) {
2604 i += scnprintf(buf + i, sz - i, "deferdq: ");
2605 i += tipc_list_dump(&l->deferdq, false, buf + i);
2606 }
2607 if (dqueues & TIPC_DUMP_INPUTQ) {
2608 i += scnprintf(buf + i, sz - i, "inputq: ");
2609 i += tipc_list_dump(l->inputq, false, buf + i);
2610 }
2611 if (dqueues & TIPC_DUMP_WAKEUP) {
2612 i += scnprintf(buf + i, sz - i, "wakeup: ");
2613 i += tipc_list_dump(&l->wakeupq, false, buf + i);
2614 }
2615
2616 return i;
2617}