blob: 2a3e9bcc1e30a3bb0c0c60c67ac3a42234fb5a3f [file] [log] [blame]
tuexendd729232011-11-01 23:04:43 +00001/*-
Michael Tuexen866a7312017-11-24 12:44:05 +01002 * SPDX-License-Identifier: BSD-3-Clause
3 *
tuexendd729232011-11-01 23:04:43 +00004 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
tuexen194eae12012-05-23 12:03:48 +00005 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
tuexendd729232011-11-01 23:04:43 +00007 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * a) Redistributions of source code must retain the above copyright notice,
tuexen9784e9a2011-12-18 13:04:23 +000012 * this list of conditions and the following disclaimer.
tuexendd729232011-11-01 23:04:43 +000013 *
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
tuexen9784e9a2011-12-18 13:04:23 +000016 * the documentation and/or other materials provided with the distribution.
tuexendd729232011-11-01 23:04:43 +000017 *
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
tuexendd729232011-11-01 23:04:43 +000035#ifdef __FreeBSD__
36#include <sys/cdefs.h>
Michael Tuexenedd369d2020-05-19 09:42:15 +020037__FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.c 361243 2020-05-19 07:23:35Z tuexen $");
tuexendd729232011-11-01 23:04:43 +000038#endif
39
40#include <netinet/sctp_os.h>
Michael Tuexene5001952016-04-17 19:25:27 +020041#ifdef __FreeBSD__
42#include <sys/proc.h>
43#endif
tuexendd729232011-11-01 23:04:43 +000044#include <netinet/sctp_var.h>
45#include <netinet/sctp_sysctl.h>
Michael Tuexen3121b802016-04-10 23:28:19 +020046#include <netinet/sctp_header.h>
Michael Tuexene5001952016-04-17 19:25:27 +020047#include <netinet/sctp_pcb.h>
tuexendd729232011-11-01 23:04:43 +000048#include <netinet/sctputil.h>
49#include <netinet/sctp_output.h>
Michael Tuexen3121b802016-04-10 23:28:19 +020050#include <netinet/sctp_uio.h>
Michael Tuexene5001952016-04-17 19:25:27 +020051#include <netinet/sctp_auth.h>
Michael Tuexen3121b802016-04-10 23:28:19 +020052#include <netinet/sctp_timer.h>
Michael Tuexene5001952016-04-17 19:25:27 +020053#include <netinet/sctp_asconf.h>
54#include <netinet/sctp_indata.h>
55#include <netinet/sctp_bsd_addr.h>
56#include <netinet/sctp_input.h>
57#include <netinet/sctp_crc32.h>
58#ifdef __FreeBSD__
59#include <netinet/sctp_lock_bsd.h>
60#endif
tuexendd729232011-11-01 23:04:43 +000061/*
62 * NOTES: On the outbound side of things I need to check the sack timer to
63 * see if I should generate a sack into the chunk queue (if I have data to
64 * send that is and will be sending it .. for bundling.
65 *
66 * The callback in sctp_usrreq.c will get called when the socket is read from.
67 * This will cause sctp_service_queues() to get called on the top entry in
68 * the list.
69 */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +020070static uint32_t
Michael Tuexene5001952016-04-17 19:25:27 +020071sctp_add_chk_to_control(struct sctp_queued_to_read *control,
72 struct sctp_stream_in *strm,
73 struct sctp_tcb *stcb,
74 struct sctp_association *asoc,
Michael Tuexenfdcf7902016-08-06 14:39:31 +020075 struct sctp_tmit_chunk *chk, int lock_held);
Michael Tuexene5001952016-04-17 19:25:27 +020076
tuexendd729232011-11-01 23:04:43 +000077
78void
79sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
80{
81 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
82}
83
84/* Calculate what the rwnd would be */
85uint32_t
86sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
87{
tuexen63fc0bb2011-12-27 12:24:52 +000088 uint32_t calc = 0;
tuexendd729232011-11-01 23:04:43 +000089
90 /*
91 * This is really set wrong with respect to a 1-2-m socket. Since
92 * the sb_cc is the count that everyone as put up. When we re-write
93 * sctp_soreceive then we will fix this so that ONLY this
94 * associations data is taken into account.
95 */
Michael Tuexene5001952016-04-17 19:25:27 +020096 if (stcb->sctp_socket == NULL) {
tuexendd729232011-11-01 23:04:43 +000097 return (calc);
Michael Tuexene5001952016-04-17 19:25:27 +020098 }
tuexendd729232011-11-01 23:04:43 +000099
Michael Tuexencdba1262017-11-05 13:05:10 +0100100 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
101 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
102 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
103 ("size_on_all_streams is %u", asoc->size_on_all_streams));
tuexendd729232011-11-01 23:04:43 +0000104 if (stcb->asoc.sb_cc == 0 &&
Michael Tuexencdba1262017-11-05 13:05:10 +0100105 asoc->cnt_on_reasm_queue == 0 &&
106 asoc->cnt_on_all_streams == 0) {
tuexendd729232011-11-01 23:04:43 +0000107 /* Full rwnd granted */
108 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
109 return (calc);
110 }
111 /* get actual space */
112 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
tuexendd729232011-11-01 23:04:43 +0000113 /*
114 * take out what has NOT been put on socket queue and we yet hold
115 * for putting up.
116 */
117 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
118 asoc->cnt_on_reasm_queue * MSIZE));
119 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
120 asoc->cnt_on_all_streams * MSIZE));
tuexendd729232011-11-01 23:04:43 +0000121 if (calc == 0) {
122 /* out of space */
123 return (calc);
124 }
125
126 /* what is the overhead of all these rwnd's */
127 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
128 /* If the window gets too small due to ctrl-stuff, reduce it
129 * to 1, even it is 0. SWS engaged
130 */
131 if (calc < stcb->asoc.my_rwnd_control_len) {
132 calc = 1;
133 }
134 return (calc);
135}
136
137
138
139/*
140 * Build out our readq entry based on the incoming packet.
141 */
142struct sctp_queued_to_read *
143sctp_build_readq_entry(struct sctp_tcb *stcb,
144 struct sctp_nets *net,
145 uint32_t tsn, uint32_t ppid,
Michael Tuexen00657ac2016-12-07 21:53:26 +0100146 uint32_t context, uint16_t sid,
147 uint32_t mid, uint8_t flags,
tuexendd729232011-11-01 23:04:43 +0000148 struct mbuf *dm)
149{
150 struct sctp_queued_to_read *read_queue_e = NULL;
151
152 sctp_alloc_a_readq(stcb, read_queue_e);
153 if (read_queue_e == NULL) {
154 goto failed_build;
155 }
Michael Tuexene5001952016-04-17 19:25:27 +0200156 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
Michael Tuexen00657ac2016-12-07 21:53:26 +0100157 read_queue_e->sinfo_stream = sid;
tuexendd729232011-11-01 23:04:43 +0000158 read_queue_e->sinfo_flags = (flags << 8);
159 read_queue_e->sinfo_ppid = ppid;
tuexen9784e9a2011-12-18 13:04:23 +0000160 read_queue_e->sinfo_context = context;
tuexendd729232011-11-01 23:04:43 +0000161 read_queue_e->sinfo_tsn = tsn;
162 read_queue_e->sinfo_cumtsn = tsn;
163 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
Michael Tuexen00657ac2016-12-07 21:53:26 +0100164 read_queue_e->mid = mid;
Michael Tuexene5001952016-04-17 19:25:27 +0200165 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
166 TAILQ_INIT(&read_queue_e->reasm);
tuexendd729232011-11-01 23:04:43 +0000167 read_queue_e->whoFrom = net;
tuexendd729232011-11-01 23:04:43 +0000168 atomic_add_int(&net->ref_count, 1);
169 read_queue_e->data = dm;
tuexendd729232011-11-01 23:04:43 +0000170 read_queue_e->stcb = stcb;
171 read_queue_e->port_from = stcb->rport;
Michael Tuexend98d2c42020-05-18 14:09:04 +0200172 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
173 read_queue_e->do_not_ref_stcb = 1;
174 }
tuexendd729232011-11-01 23:04:43 +0000175failed_build:
176 return (read_queue_e);
177}
178
tuexendd729232011-11-01 23:04:43 +0000179struct mbuf *
180sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
181{
182 struct sctp_extrcvinfo *seinfo;
183 struct sctp_sndrcvinfo *outinfo;
184 struct sctp_rcvinfo *rcvinfo;
185 struct sctp_nxtinfo *nxtinfo;
t00fcxen8d8ec792012-09-04 22:31:29 +0000186#if defined(__Userspace_os_Windows)
187 WSACMSGHDR *cmh;
188#else
tuexendd729232011-11-01 23:04:43 +0000189 struct cmsghdr *cmh;
t00fcxen8d8ec792012-09-04 22:31:29 +0000190#endif
tuexendd729232011-11-01 23:04:43 +0000191 struct mbuf *ret;
192 int len;
193 int use_extended;
194 int provide_nxt;
195
196 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
197 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
198 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
199 /* user does not want any ancillary data */
200 return (NULL);
201 }
202
203 len = 0;
204 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
205 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
206 }
207 seinfo = (struct sctp_extrcvinfo *)sinfo;
208 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
Michael Tuexene8185522015-11-06 14:17:33 +0100209 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
tuexendd729232011-11-01 23:04:43 +0000210 provide_nxt = 1;
Michael Tuexen94656502015-11-06 23:14:32 +0100211 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
tuexendd729232011-11-01 23:04:43 +0000212 } else {
213 provide_nxt = 0;
214 }
215 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
216 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
217 use_extended = 1;
218 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
219 } else {
220 use_extended = 0;
221 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
222 }
223 } else {
224 use_extended = 0;
225 }
226
t00fcxen23c2b8f2012-12-10 20:15:50 +0000227 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
tuexendd729232011-11-01 23:04:43 +0000228 if (ret == NULL) {
229 /* No space */
230 return (ret);
231 }
232 SCTP_BUF_LEN(ret) = 0;
233
234 /* We need a CMSG header followed by the struct */
t00fcxen8d8ec792012-09-04 22:31:29 +0000235#if defined(__Userspace_os_Windows)
236 cmh = mtod(ret, WSACMSGHDR *);
237#else
tuexendd729232011-11-01 23:04:43 +0000238 cmh = mtod(ret, struct cmsghdr *);
t00fcxen8d8ec792012-09-04 22:31:29 +0000239#endif
t00fcxen6b2685d2014-07-11 06:33:20 +0000240 /*
241 * Make sure that there is no un-initialized padding between
242 * the cmsg header and cmsg data and after the cmsg data.
243 */
244 memset(cmh, 0, len);
tuexendd729232011-11-01 23:04:43 +0000245 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
246 cmh->cmsg_level = IPPROTO_SCTP;
247 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
248 cmh->cmsg_type = SCTP_RCVINFO;
249 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
250 rcvinfo->rcv_sid = sinfo->sinfo_stream;
251 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
252 rcvinfo->rcv_flags = sinfo->sinfo_flags;
253 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
254 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
255 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
256 rcvinfo->rcv_context = sinfo->sinfo_context;
257 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
t00fcxen8d8ec792012-09-04 22:31:29 +0000258#if defined(__Userspace_os_Windows)
259 cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
260#else
tuexendd729232011-11-01 23:04:43 +0000261 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
t00fcxen8d8ec792012-09-04 22:31:29 +0000262#endif
tuexendd729232011-11-01 23:04:43 +0000263 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
264 }
265 if (provide_nxt) {
266 cmh->cmsg_level = IPPROTO_SCTP;
267 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
268 cmh->cmsg_type = SCTP_NXTINFO;
269 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
Michael Tuexene8185522015-11-06 14:17:33 +0100270 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
tuexendd729232011-11-01 23:04:43 +0000271 nxtinfo->nxt_flags = 0;
Michael Tuexene8185522015-11-06 14:17:33 +0100272 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
tuexendd729232011-11-01 23:04:43 +0000273 nxtinfo->nxt_flags |= SCTP_UNORDERED;
274 }
Michael Tuexene8185522015-11-06 14:17:33 +0100275 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
tuexendd729232011-11-01 23:04:43 +0000276 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
277 }
Michael Tuexene8185522015-11-06 14:17:33 +0100278 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
tuexendd729232011-11-01 23:04:43 +0000279 nxtinfo->nxt_flags |= SCTP_COMPLETE;
280 }
Michael Tuexene8185522015-11-06 14:17:33 +0100281 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
282 nxtinfo->nxt_length = seinfo->serinfo_next_length;
283 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
t00fcxen8d8ec792012-09-04 22:31:29 +0000284#if defined(__Userspace_os_Windows)
285 cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
286#else
tuexendd729232011-11-01 23:04:43 +0000287 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
t00fcxen8d8ec792012-09-04 22:31:29 +0000288#endif
tuexendd729232011-11-01 23:04:43 +0000289 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
290 }
291 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
292 cmh->cmsg_level = IPPROTO_SCTP;
293 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
294 if (use_extended) {
295 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
296 cmh->cmsg_type = SCTP_EXTRCV;
297 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
298 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
299 } else {
300 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
301 cmh->cmsg_type = SCTP_SNDRCV;
302 *outinfo = *sinfo;
303 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
304 }
305 }
306 return (ret);
307}
308
309
310static void
311sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
312{
313 uint32_t gap, i, cumackp1;
314 int fnd = 0;
Michael Tuexene5001952016-04-17 19:25:27 +0200315 int in_r=0, in_nr=0;
tuexendd729232011-11-01 23:04:43 +0000316 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
317 return;
318 }
319 cumackp1 = asoc->cumulative_tsn + 1;
320 if (SCTP_TSN_GT(cumackp1, tsn)) {
321 /* this tsn is behind the cum ack and thus we don't
322 * need to worry about it being moved from one to the other.
323 */
324 return;
325 }
326 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
Michael Tuexene5001952016-04-17 19:25:27 +0200327 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
328 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
329 if ((in_r == 0) && (in_nr == 0)) {
330#ifdef INVARIANTS
331 panic("Things are really messed up now");
332#else
tuexencb5fe8d2012-05-04 09:50:27 +0000333 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
tuexendd729232011-11-01 23:04:43 +0000334 sctp_print_mapping_array(asoc);
tuexendd729232011-11-01 23:04:43 +0000335#endif
336 }
Michael Tuexene5001952016-04-17 19:25:27 +0200337 if (in_nr == 0)
338 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
339 if (in_r)
340 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
tuexendd729232011-11-01 23:04:43 +0000341 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
342 asoc->highest_tsn_inside_nr_map = tsn;
343 }
344 if (tsn == asoc->highest_tsn_inside_map) {
345 /* We must back down to see what the new highest is */
346 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
347 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
348 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
349 asoc->highest_tsn_inside_map = i;
350 fnd = 1;
351 break;
352 }
353 }
354 if (!fnd) {
355 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
356 }
357 }
358}
359
Michael Tuexene5001952016-04-17 19:25:27 +0200360static int
361sctp_place_control_in_stream(struct sctp_stream_in *strm,
362 struct sctp_association *asoc,
363 struct sctp_queued_to_read *control)
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200364{
Michael Tuexene5001952016-04-17 19:25:27 +0200365 struct sctp_queued_to_read *at;
366 struct sctp_readhead *q;
Michael Tuexendbfc1b82016-12-11 14:57:19 +0100367 uint8_t flags, unordered;
Michael Tuexen3121b802016-04-10 23:28:19 +0200368
Michael Tuexendbfc1b82016-12-11 14:57:19 +0100369 flags = (control->sinfo_flags >> 8);
370 unordered = flags & SCTP_DATA_UNORDERED;
Michael Tuexene5001952016-04-17 19:25:27 +0200371 if (unordered) {
372 q = &strm->uno_inqueue;
373 if (asoc->idata_supported == 0) {
374 if (!TAILQ_EMPTY(q)) {
375 /* Only one stream can be here in old style -- abort */
376 return (-1);
Michael Tuexen3121b802016-04-10 23:28:19 +0200377 }
Michael Tuexene5001952016-04-17 19:25:27 +0200378 TAILQ_INSERT_TAIL(q, control, next_instrm);
379 control->on_strm_q = SCTP_ON_UNORDERED;
380 return (0);
Michael Tuexen3121b802016-04-10 23:28:19 +0200381 }
Michael Tuexene5001952016-04-17 19:25:27 +0200382 } else {
383 q = &strm->inqueue;
Michael Tuexen3121b802016-04-10 23:28:19 +0200384 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +0100385 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
386 control->end_added = 1;
387 control->first_frag_seen = 1;
388 control->last_frag_seen = 1;
Michael Tuexene5001952016-04-17 19:25:27 +0200389 }
390 if (TAILQ_EMPTY(q)) {
391 /* Empty queue */
392 TAILQ_INSERT_HEAD(q, control, next_instrm);
393 if (unordered) {
394 control->on_strm_q = SCTP_ON_UNORDERED;
Michael Tuexen3121b802016-04-10 23:28:19 +0200395 } else {
Michael Tuexene5001952016-04-17 19:25:27 +0200396 control->on_strm_q = SCTP_ON_ORDERED;
397 }
398 return (0);
399 } else {
400 TAILQ_FOREACH(at, q, next_instrm) {
Michael Tuexen00657ac2016-12-07 21:53:26 +0100401 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
Michael Tuexen3121b802016-04-10 23:28:19 +0200402 /*
Michael Tuexene5001952016-04-17 19:25:27 +0200403 * one in queue is bigger than the
404 * new one, insert before this one
Michael Tuexen3121b802016-04-10 23:28:19 +0200405 */
Michael Tuexene5001952016-04-17 19:25:27 +0200406 TAILQ_INSERT_BEFORE(at, control, next_instrm);
407 if (unordered) {
408 control->on_strm_q = SCTP_ON_UNORDERED;
Michael Tuexen3121b802016-04-10 23:28:19 +0200409 } else {
Michael Tuexene5001952016-04-17 19:25:27 +0200410 control->on_strm_q = SCTP_ON_ORDERED ;
Michael Tuexen3121b802016-04-10 23:28:19 +0200411 }
Michael Tuexene5001952016-04-17 19:25:27 +0200412 break;
Michael Tuexen00657ac2016-12-07 21:53:26 +0100413 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
Michael Tuexene5001952016-04-17 19:25:27 +0200414 /*
415 * Gak, He sent me a duplicate msg
416 * id number?? return -1 to abort.
417 */
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200418 return (-1);
Michael Tuexene5001952016-04-17 19:25:27 +0200419 } else {
420 if (TAILQ_NEXT(at, next_instrm) == NULL) {
421 /*
422 * We are at the end, insert
423 * it after this one
424 */
425 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
426 sctp_log_strm_del(control, at,
427 SCTP_STR_LOG_FROM_INSERT_TL);
428 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +0100429 TAILQ_INSERT_AFTER(q, at, control, next_instrm);
Michael Tuexene5001952016-04-17 19:25:27 +0200430 if (unordered) {
431 control->on_strm_q = SCTP_ON_UNORDERED ;
432 } else {
433 control->on_strm_q = SCTP_ON_ORDERED ;
434 }
Michael Tuexen3121b802016-04-10 23:28:19 +0200435 break;
436 }
Michael Tuexen3121b802016-04-10 23:28:19 +0200437 }
Michael Tuexen3121b802016-04-10 23:28:19 +0200438 }
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200439 }
Michael Tuexene5001952016-04-17 19:25:27 +0200440 return (0);
441}
442
443static void
444sctp_abort_in_reasm(struct sctp_tcb *stcb,
Michael Tuexene5001952016-04-17 19:25:27 +0200445 struct sctp_queued_to_read *control,
446 struct sctp_tmit_chunk *chk,
447 int *abort_flag, int opspot)
448{
449 char msg[SCTP_DIAG_INFO_LEN];
450 struct mbuf *oper;
Michael Tuexena9d8c472016-04-18 22:22:59 +0200451
Michael Tuexene5001952016-04-17 19:25:27 +0200452 if (stcb->asoc.idata_supported) {
Michael Tuexenedd369d2020-05-19 09:42:15 +0200453 SCTP_SNPRINTF(msg, sizeof(msg),
454 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
455 opspot,
456 control->fsn_included,
457 chk->rec.data.tsn,
458 chk->rec.data.sid,
459 chk->rec.data.fsn, chk->rec.data.mid);
Michael Tuexene5001952016-04-17 19:25:27 +0200460 } else {
Michael Tuexenedd369d2020-05-19 09:42:15 +0200461 SCTP_SNPRINTF(msg, sizeof(msg),
462 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
463 opspot,
464 control->fsn_included,
465 chk->rec.data.tsn,
466 chk->rec.data.sid,
467 chk->rec.data.fsn,
468 (uint16_t)chk->rec.data.mid);
Michael Tuexene5001952016-04-17 19:25:27 +0200469 }
470 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
471 sctp_m_freem(chk->data);
472 chk->data = NULL;
473 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
474 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
475 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
476 *abort_flag = 1;
477}
478
479static void
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200480sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
Michael Tuexene5001952016-04-17 19:25:27 +0200481{
Michael Tuexend3331282020-02-03 23:14:00 +0100482 /*
Michael Tuexene5001952016-04-17 19:25:27 +0200483 * The control could not be placed and must be cleaned.
484 */
485 struct sctp_tmit_chunk *chk, *nchk;
486 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
487 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
488 if (chk->data)
489 sctp_m_freem(chk->data);
490 chk->data = NULL;
491 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
492 }
Michael Tuexenb07df882019-09-24 15:21:23 +0200493 sctp_free_remote_addr(control->whoFrom);
494 if (control->data) {
495 sctp_m_freem(control->data);
496 control->data = NULL;
497 }
498 sctp_free_a_readq(stcb, control);
tuexendd729232011-11-01 23:04:43 +0000499}
500
501/*
502 * Queue the chunk either right into the socket buffer if it is the next one
503 * to go OR put it in the correct place in the delivery queue. If we do
Michael Tuexene5001952016-04-17 19:25:27 +0200504 * append to the so_buf, keep doing so until we are out of order as
505 * long as the control's entered are non-fragmented.
tuexendd729232011-11-01 23:04:43 +0000506 */
507static void
Michael Tuexene5001952016-04-17 19:25:27 +0200508sctp_queue_data_to_stream(struct sctp_tcb *stcb,
Michael Tuexene5001952016-04-17 19:25:27 +0200509 struct sctp_association *asoc,
510 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
tuexendd729232011-11-01 23:04:43 +0000511{
512 /*
513 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
514 * all the data in one stream this could happen quite rapidly. One
515 * could use the TSN to keep track of things, but this scheme breaks
Michael Tuexen34488e72016-05-03 22:11:59 +0200516 * down in the other type of stream usage that could occur. Send a
tuexendd729232011-11-01 23:04:43 +0000517 * single msg to stream 0, send 4Billion messages to stream 1, now
518 * send a message to stream 0. You have a situation where the TSN
519 * has wrapped but not in the stream. Is this worth worrying about
520 * or should we just change our queue sort at the bottom to be by
521 * TSN.
522 *
523 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
524 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
525 * assignment this could happen... and I don't see how this would be
526 * a violation. So for now I am undecided an will leave the sort by
527 * SSN alone. Maybe a hybred approach is the answer
528 *
529 */
tuexendd729232011-11-01 23:04:43 +0000530 struct sctp_queued_to_read *at;
531 int queue_needed;
Michael Tuexene5001952016-04-17 19:25:27 +0200532 uint32_t nxt_todel;
t00fcxen08f9ff92014-03-16 13:38:54 +0000533 struct mbuf *op_err;
Michael Tuexene411f662016-12-17 23:36:21 +0100534 struct sctp_stream_in *strm;
t00fcxen08f9ff92014-03-16 13:38:54 +0000535 char msg[SCTP_DIAG_INFO_LEN];
tuexen15f99d82012-04-19 16:08:38 +0000536
Michael Tuexene411f662016-12-17 23:36:21 +0100537 strm = &asoc->strmin[control->sinfo_stream];
tuexendd729232011-11-01 23:04:43 +0000538 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
539 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
540 }
Michael Tuexen00657ac2016-12-07 21:53:26 +0100541 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
tuexendd729232011-11-01 23:04:43 +0000542 /* The incoming sseq is behind where we last delivered? */
Michael Tuexeneccb4be2016-04-18 08:58:59 +0200543 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
Michael Tuexene411f662016-12-17 23:36:21 +0100544 strm->last_mid_delivered, control->mid);
tuexendd729232011-11-01 23:04:43 +0000545 /*
546 * throw it in the stream so it gets cleaned up in
547 * association destruction
548 */
Michael Tuexene5001952016-04-17 19:25:27 +0200549 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
Michael Tuexen00657ac2016-12-07 21:53:26 +0100550 if (asoc->idata_supported) {
Michael Tuexenedd369d2020-05-19 09:42:15 +0200551 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
552 strm->last_mid_delivered, control->sinfo_tsn,
553 control->sinfo_stream, control->mid);
Michael Tuexen00657ac2016-12-07 21:53:26 +0100554 } else {
Michael Tuexenedd369d2020-05-19 09:42:15 +0200555 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
556 (uint16_t)strm->last_mid_delivered,
557 control->sinfo_tsn,
558 control->sinfo_stream,
559 (uint16_t)control->mid);
Michael Tuexen00657ac2016-12-07 21:53:26 +0100560 }
t00fcxen08f9ff92014-03-16 13:38:54 +0000561 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexene5001952016-04-17 19:25:27 +0200562 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
t00fcxen08f9ff92014-03-16 13:38:54 +0000563 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +0000564 *abort_flag = 1;
565 return;
566
567 }
Michael Tuexene5001952016-04-17 19:25:27 +0200568 queue_needed = 1;
569 asoc->size_on_all_streams += control->length;
570 sctp_ucount_incr(asoc->cnt_on_all_streams);
Michael Tuexen00657ac2016-12-07 21:53:26 +0100571 nxt_todel = strm->last_mid_delivered + 1;
572 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
Michael Tuexene5001952016-04-17 19:25:27 +0200573#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
574 struct socket *so;
575
576 so = SCTP_INP_SO(stcb->sctp_ep);
577 atomic_add_int(&stcb->asoc.refcnt, 1);
578 SCTP_TCB_UNLOCK(stcb);
579 SCTP_SOCKET_LOCK(so, 1);
580 SCTP_TCB_LOCK(stcb);
581 atomic_subtract_int(&stcb->asoc.refcnt, 1);
582 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
583 SCTP_SOCKET_UNLOCK(so, 1);
584 return;
585 }
586#endif
tuexendd729232011-11-01 23:04:43 +0000587 /* can be delivered right away? */
588 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
589 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
590 }
Michael Tuexene411f662016-12-17 23:36:21 +0100591 /* EY it wont be queued if it could be delivered directly */
tuexendd729232011-11-01 23:04:43 +0000592 queue_needed = 0;
Michael Tuexenbe5e3e72017-07-19 14:44:48 +0200593 if (asoc->size_on_all_streams >= control->length) {
594 asoc->size_on_all_streams -= control->length;
595 } else {
596#ifdef INVARIANTS
597 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
598#else
599 asoc->size_on_all_streams = 0;
600#endif
601 }
tuexendd729232011-11-01 23:04:43 +0000602 sctp_ucount_decr(asoc->cnt_on_all_streams);
Michael Tuexen00657ac2016-12-07 21:53:26 +0100603 strm->last_mid_delivered++;
tuexendd729232011-11-01 23:04:43 +0000604 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
605 sctp_add_to_readq(stcb->sctp_ep, stcb,
606 control,
607 &stcb->sctp_socket->so_rcv, 1,
Michael Tuexen6ecb9e42016-05-14 15:47:51 +0200608 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +0200609 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
tuexendd729232011-11-01 23:04:43 +0000610 /* all delivered */
Michael Tuexen00657ac2016-12-07 21:53:26 +0100611 nxt_todel = strm->last_mid_delivered + 1;
612 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
Michael Tuexene5001952016-04-17 19:25:27 +0200613 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
Michael Tuexene5001952016-04-17 19:25:27 +0200614 if (control->on_strm_q == SCTP_ON_ORDERED) {
615 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +0200616 if (asoc->size_on_all_streams >= control->length) {
617 asoc->size_on_all_streams -= control->length;
618 } else {
619#ifdef INVARIANTS
620 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
621#else
622 asoc->size_on_all_streams = 0;
623#endif
624 }
625 sctp_ucount_decr(asoc->cnt_on_all_streams);
Michael Tuexenf6d20c52016-04-18 11:31:05 +0200626#ifdef INVARIANTS
Michael Tuexene5001952016-04-17 19:25:27 +0200627 } else {
Michael Tuexeneccb4be2016-04-18 08:58:59 +0200628 panic("Huh control: %p is on_strm_q: %d",
Michael Tuexene5001952016-04-17 19:25:27 +0200629 control, control->on_strm_q);
Michael Tuexenf6d20c52016-04-18 11:31:05 +0200630#endif
Michael Tuexene5001952016-04-17 19:25:27 +0200631 }
632 control->on_strm_q = 0;
Michael Tuexen00657ac2016-12-07 21:53:26 +0100633 strm->last_mid_delivered++;
tuexendd729232011-11-01 23:04:43 +0000634 /*
635 * We ignore the return of deliver_data here
636 * since we always can hold the chunk on the
637 * d-queue. And we have a finite number that
638 * can be delivered from the strq.
639 */
640 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
641 sctp_log_strm_del(control, NULL,
642 SCTP_STR_LOG_FROM_IMMED_DEL);
643 }
644 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
645 sctp_add_to_readq(stcb->sctp_ep, stcb,
646 control,
647 &stcb->sctp_socket->so_rcv, 1,
648 SCTP_READ_LOCK_NOT_HELD,
Michael Tuexen6ecb9e42016-05-14 15:47:51 +0200649 SCTP_SO_LOCKED);
tuexendd729232011-11-01 23:04:43 +0000650 continue;
Michael Tuexen00657ac2016-12-07 21:53:26 +0100651 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
Michael Tuexene5001952016-04-17 19:25:27 +0200652 *need_reasm = 1;
tuexendd729232011-11-01 23:04:43 +0000653 }
654 break;
655 }
Michael Tuexene5001952016-04-17 19:25:27 +0200656#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
657 SCTP_SOCKET_UNLOCK(so, 1);
658#endif
tuexendd729232011-11-01 23:04:43 +0000659 }
660 if (queue_needed) {
661 /*
662 * Ok, we did not deliver this guy, find the correct place
663 * to put it on the queue.
664 */
Michael Tuexene5001952016-04-17 19:25:27 +0200665 if (sctp_place_control_in_stream(strm, asoc, control)) {
Michael Tuexenedd369d2020-05-19 09:42:15 +0200666 SCTP_SNPRINTF(msg, sizeof(msg),
667 "Queue to str MID: %u duplicate", control->mid);
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200668 sctp_clean_up_control(stcb, control);
Michael Tuexena9d8c472016-04-18 22:22:59 +0200669 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexene5001952016-04-17 19:25:27 +0200670 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
Michael Tuexena9d8c472016-04-18 22:22:59 +0200671 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +0200672 *abort_flag = 1;
tuexendd729232011-11-01 23:04:43 +0000673 }
674 }
Michael Tuexen3121b802016-04-10 23:28:19 +0200675}
676
Michael Tuexen3121b802016-04-10 23:28:19 +0200677
Michael Tuexene5001952016-04-17 19:25:27 +0200678static void
679sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
680{
681 struct mbuf *m, *prev = NULL;
682 struct sctp_tcb *stcb;
683
684 stcb = control->stcb;
685 control->held_length = 0;
686 control->length = 0;
687 m = control->data;
688 while (m) {
689 if (SCTP_BUF_LEN(m) == 0) {
690 /* Skip mbufs with NO length */
691 if (prev == NULL) {
692 /* First one */
693 control->data = sctp_m_free(m);
694 m = control->data;
695 } else {
696 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
697 m = SCTP_BUF_NEXT(prev);
698 }
699 if (m == NULL) {
700 control->tail_mbuf = prev;
701 }
702 continue;
Michael Tuexen3121b802016-04-10 23:28:19 +0200703 }
Michael Tuexene5001952016-04-17 19:25:27 +0200704 prev = m;
705 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
706 if (control->on_read_q) {
707 /*
708 * On read queue so we must increment the
709 * SB stuff, we assume caller has done any locks of SB.
710 */
711 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
Michael Tuexen3121b802016-04-10 23:28:19 +0200712 }
Michael Tuexene5001952016-04-17 19:25:27 +0200713 m = SCTP_BUF_NEXT(m);
Michael Tuexen3121b802016-04-10 23:28:19 +0200714 }
Michael Tuexene5001952016-04-17 19:25:27 +0200715 if (prev) {
716 control->tail_mbuf = prev;
717 }
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200718}
719
720static void
Michael Tuexenbe5e3e72017-07-19 14:44:48 +0200721sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200722{
Michael Tuexene5001952016-04-17 19:25:27 +0200723 struct mbuf *prev=NULL;
724 struct sctp_tcb *stcb;
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200725
Michael Tuexene5001952016-04-17 19:25:27 +0200726 stcb = control->stcb;
727 if (stcb == NULL) {
Michael Tuexenf6d20c52016-04-18 11:31:05 +0200728#ifdef INVARIANTS
Michael Tuexene5001952016-04-17 19:25:27 +0200729 panic("Control broken");
Michael Tuexenf6d20c52016-04-18 11:31:05 +0200730#else
731 return;
732#endif
Michael Tuexene5001952016-04-17 19:25:27 +0200733 }
734 if (control->tail_mbuf == NULL) {
735 /* TSNH */
Michael Tuexenc38740e2019-10-06 10:52:55 +0200736 sctp_m_freem(control->data);
Michael Tuexene5001952016-04-17 19:25:27 +0200737 control->data = m;
738 sctp_setup_tail_pointer(control);
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200739 return;
740 }
Michael Tuexene5001952016-04-17 19:25:27 +0200741 control->tail_mbuf->m_next = m;
742 while (m) {
743 if (SCTP_BUF_LEN(m) == 0) {
744 /* Skip mbufs with NO length */
745 if (prev == NULL) {
746 /* First one */
747 control->tail_mbuf->m_next = sctp_m_free(m);
748 m = control->tail_mbuf->m_next;
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200749 } else {
Michael Tuexene5001952016-04-17 19:25:27 +0200750 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
751 m = SCTP_BUF_NEXT(prev);
Michael Tuexen3121b802016-04-10 23:28:19 +0200752 }
Michael Tuexene5001952016-04-17 19:25:27 +0200753 if (m == NULL) {
754 control->tail_mbuf = prev;
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200755 }
Michael Tuexene5001952016-04-17 19:25:27 +0200756 continue;
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200757 }
Michael Tuexene5001952016-04-17 19:25:27 +0200758 prev = m;
759 if (control->on_read_q) {
760 /*
761 * On read queue so we must increment the
762 * SB stuff, we assume caller has done any locks of SB.
Michael Tuexen3121b802016-04-10 23:28:19 +0200763 */
Michael Tuexene5001952016-04-17 19:25:27 +0200764 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
765 }
Michael Tuexenbe5e3e72017-07-19 14:44:48 +0200766 *added += SCTP_BUF_LEN(m);
Michael Tuexene5001952016-04-17 19:25:27 +0200767 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
768 m = SCTP_BUF_NEXT(m);
769 }
770 if (prev) {
771 control->tail_mbuf = prev;
772 }
773}
774
Michael Tuexend3331282020-02-03 23:14:00 +0100775static void
Michael Tuexene5001952016-04-17 19:25:27 +0200776sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
777{
778 memset(nc, 0, sizeof(struct sctp_queued_to_read));
779 nc->sinfo_stream = control->sinfo_stream;
Michael Tuexen00657ac2016-12-07 21:53:26 +0100780 nc->mid = control->mid;
Michael Tuexene5001952016-04-17 19:25:27 +0200781 TAILQ_INIT(&nc->reasm);
782 nc->top_fsn = control->top_fsn;
Michael Tuexen00657ac2016-12-07 21:53:26 +0100783 nc->mid = control->mid;
Michael Tuexene5001952016-04-17 19:25:27 +0200784 nc->sinfo_flags = control->sinfo_flags;
785 nc->sinfo_ppid = control->sinfo_ppid;
786 nc->sinfo_context = control->sinfo_context;
787 nc->fsn_included = 0xffffffff;
788 nc->sinfo_tsn = control->sinfo_tsn;
789 nc->sinfo_cumtsn = control->sinfo_cumtsn;
790 nc->sinfo_assoc_id = control->sinfo_assoc_id;
791 nc->whoFrom = control->whoFrom;
792 atomic_add_int(&nc->whoFrom->ref_count, 1);
793 nc->stcb = control->stcb;
794 nc->port_from = control->port_from;
Michael Tuexend98d2c42020-05-18 14:09:04 +0200795 nc->do_not_ref_stcb = control->do_not_ref_stcb;
Michael Tuexene5001952016-04-17 19:25:27 +0200796}
797
Michael Tuexend3331282020-02-03 23:14:00 +0100798static void
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200799sctp_reset_a_control(struct sctp_queued_to_read *control,
800 struct sctp_inpcb *inp, uint32_t tsn)
801{
802 control->fsn_included = tsn;
803 if (control->on_read_q) {
Michael Tuexend3331282020-02-03 23:14:00 +0100804 /*
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200805 * We have to purge it from there,
806 * hopefully this will work :-)
807 */
808 TAILQ_REMOVE(&inp->read_queue, control, next);
809 control->on_read_q = 0;
810 }
811}
812
Michael Tuexene5001952016-04-17 19:25:27 +0200813static int
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200814sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
815 struct sctp_association *asoc,
816 struct sctp_stream_in *strm,
817 struct sctp_queued_to_read *control,
818 uint32_t pd_point,
819 int inp_read_lock_held)
Michael Tuexene5001952016-04-17 19:25:27 +0200820{
821 /* Special handling for the old un-ordered data chunk.
Michael Tuexen00657ac2016-12-07 21:53:26 +0100822 * All the chunks/TSN's go to mid 0. So
Michael Tuexene5001952016-04-17 19:25:27 +0200823 * we have to do the old style watching to see
824 * if we have it all. If you return one, no other
825 * control entries on the un-ordered queue will
826 * be looked at. In theory there should be no others
827 * entries in reality, unless the guy is sending both
828 * unordered NDATA and unordered DATA...
829 */
830 struct sctp_tmit_chunk *chk, *lchk, *tchk;
831 uint32_t fsn;
Michael Tuexenc0d26d92016-07-16 12:22:25 +0200832 struct sctp_queued_to_read *nc;
Michael Tuexene5001952016-04-17 19:25:27 +0200833 int cnt_added;
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200834
Michael Tuexene5001952016-04-17 19:25:27 +0200835 if (control->first_frag_seen == 0) {
836 /* Nothing we can do, we have not seen the first piece yet */
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200837 return (1);
Michael Tuexene5001952016-04-17 19:25:27 +0200838 }
839 /* Collapse any we can */
840 cnt_added = 0;
841restart:
842 fsn = control->fsn_included + 1;
843 /* Now what can we add? */
844 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
Michael Tuexen00657ac2016-12-07 21:53:26 +0100845 if (chk->rec.data.fsn == fsn) {
Michael Tuexene5001952016-04-17 19:25:27 +0200846 /* Ok lets add it */
Michael Tuexenc0d26d92016-07-16 12:22:25 +0200847 sctp_alloc_a_readq(stcb, nc);
848 if (nc == NULL) {
849 break;
850 }
Michael Tuexen2b62a392016-07-16 13:59:54 +0200851 memset(nc, 0, sizeof(struct sctp_queued_to_read));
Michael Tuexene5001952016-04-17 19:25:27 +0200852 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200853 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
Michael Tuexene5001952016-04-17 19:25:27 +0200854 fsn++;
855 cnt_added++;
856 chk = NULL;
857 if (control->end_added) {
858 /* We are done */
859 if (!TAILQ_EMPTY(&control->reasm)) {
Michael Tuexend3331282020-02-03 23:14:00 +0100860 /*
Michael Tuexene5001952016-04-17 19:25:27 +0200861 * Ok we have to move anything left on
862 * the control queue to a new control.
863 */
Michael Tuexene5001952016-04-17 19:25:27 +0200864 sctp_build_readq_entry_from_ctl(nc, control);
865 tchk = TAILQ_FIRST(&control->reasm);
866 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
867 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +0200868 if (asoc->size_on_reasm_queue >= tchk->send_size) {
869 asoc->size_on_reasm_queue -= tchk->send_size;
870 } else {
871#ifdef INVARIANTS
872 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
873#else
874 asoc->size_on_reasm_queue = 0;
875#endif
876 }
Michael Tuexen93e6e552016-09-22 16:25:12 +0200877 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
Michael Tuexene5001952016-04-17 19:25:27 +0200878 nc->first_frag_seen = 1;
Michael Tuexen00657ac2016-12-07 21:53:26 +0100879 nc->fsn_included = tchk->rec.data.fsn;
Michael Tuexene5001952016-04-17 19:25:27 +0200880 nc->data = tchk->data;
Michael Tuexen00657ac2016-12-07 21:53:26 +0100881 nc->sinfo_ppid = tchk->rec.data.ppid;
882 nc->sinfo_tsn = tchk->rec.data.tsn;
883 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
Michael Tuexene5001952016-04-17 19:25:27 +0200884 tchk->data = NULL;
885 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
886 sctp_setup_tail_pointer(nc);
887 tchk = TAILQ_FIRST(&control->reasm);
888 }
889 /* Spin the rest onto the queue */
890 while (tchk) {
891 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
892 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
893 tchk = TAILQ_FIRST(&control->reasm);
894 }
895 /* Now lets add it to the queue after removing control */
896 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
897 nc->on_strm_q = SCTP_ON_UNORDERED;
898 if (control->on_strm_q) {
899 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
900 control->on_strm_q = 0;
901 }
902 }
Michael Tuexene5001952016-04-17 19:25:27 +0200903 if (control->pdapi_started) {
904 strm->pd_api_started = 0;
905 control->pdapi_started = 0;
906 }
907 if (control->on_strm_q) {
908 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
909 control->on_strm_q = 0;
Michael Tuexen07cc2ed2016-07-17 10:33:57 +0200910 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
Michael Tuexene5001952016-04-17 19:25:27 +0200911 }
Michael Tuexenb0298bf2016-04-27 21:01:33 +0200912 if (control->on_read_q == 0) {
913 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
914 &stcb->sctp_socket->so_rcv, control->end_added,
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200915 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
Michael Tuexen65394262016-05-09 08:17:54 +0200916#if defined(__Userspace__)
917 } else {
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200918 sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held);
Michael Tuexen65394262016-05-09 08:17:54 +0200919#endif
Michael Tuexenb0298bf2016-04-27 21:01:33 +0200920 }
Michael Tuexena9d8c472016-04-18 22:22:59 +0200921 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
Michael Tuexen2a74c792016-07-19 13:20:24 +0200922 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
Michael Tuexene5001952016-04-17 19:25:27 +0200923 /* Switch to the new guy and continue */
924 control = nc;
Michael Tuexene5001952016-04-17 19:25:27 +0200925 goto restart;
Michael Tuexenc0d26d92016-07-16 12:22:25 +0200926 } else {
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200927 if (nc->on_strm_q == 0) {
928 sctp_free_a_readq(stcb, nc);
929 }
Michael Tuexene5001952016-04-17 19:25:27 +0200930 }
931 return (1);
Michael Tuexenc0d26d92016-07-16 12:22:25 +0200932 } else {
933 sctp_free_a_readq(stcb, nc);
Michael Tuexene5001952016-04-17 19:25:27 +0200934 }
935 } else {
936 /* Can't add more */
937 break;
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200938 }
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200939 }
Michael Tüxen703ca422019-07-22 14:13:53 -0400940 if (cnt_added && strm->pd_api_started) {
941#if defined(__Userspace__)
942 sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD);
943#endif
944 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
945 }
Michael Tuexene5001952016-04-17 19:25:27 +0200946 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
Michael Tuexenb0298bf2016-04-27 21:01:33 +0200947 strm->pd_api_started = 1;
948 control->pdapi_started = 1;
Michael Tuexene5001952016-04-17 19:25:27 +0200949 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
950 &stcb->sctp_socket->so_rcv, control->end_added,
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200951 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
Michael Tuexena9d8c472016-04-18 22:22:59 +0200952 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +0200953 return (0);
954 } else {
955 return (1);
956 }
957}
958
959static void
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200960sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
961 struct sctp_association *asoc,
962 struct sctp_queued_to_read *control,
963 struct sctp_tmit_chunk *chk,
964 int *abort_flag)
Michael Tuexene5001952016-04-17 19:25:27 +0200965{
966 struct sctp_tmit_chunk *at;
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200967 int inserted;
Michael Tuexene5001952016-04-17 19:25:27 +0200968 /*
969 * Here we need to place the chunk into the control structure
970 * sorted in the correct order.
971 */
972 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
973 /* Its the very first one. */
974 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +0200975 "chunk is a first fsn: %u becomes fsn_included\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +0100976 chk->rec.data.fsn);
Michael Tuexen6dcb0e02019-03-26 14:08:49 +0100977 at = TAILQ_FIRST(&control->reasm);
978 if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
Michael Tuexend3331282020-02-03 23:14:00 +0100979 /*
Michael Tuexen6dcb0e02019-03-26 14:08:49 +0100980 * The first chunk in the reassembly is
981 * a smaller TSN than this one, even though
982 * this has a first, it must be from a subsequent
983 * msg.
984 */
985 goto place_chunk;
986 }
Michael Tuexene5001952016-04-17 19:25:27 +0200987 if (control->first_frag_seen) {
988 /*
989 * In old un-ordered we can reassembly on
990 * one control multiple messages. As long
991 * as the next FIRST is greater then the old
992 * first (TSN i.e. FSN wise)
993 */
994 struct mbuf *tdata;
995 uint32_t tmp;
996
Michael Tuexen00657ac2016-12-07 21:53:26 +0100997 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
Michael Tuexene5001952016-04-17 19:25:27 +0200998 /* Easy way the start of a new guy beyond the lowest */
999 goto place_chunk;
1000 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01001001 if ((chk->rec.data.fsn == control->fsn_included) ||
Michael Tuexene5001952016-04-17 19:25:27 +02001002 (control->pdapi_started)) {
Michael Tuexend3331282020-02-03 23:14:00 +01001003 /*
Michael Tuexene5001952016-04-17 19:25:27 +02001004 * Ok this should not happen, if it does
1005 * we started the pd-api on the higher TSN (since
1006 * the equals part is a TSN failure it must be that).
1007 *
1008 * We are completly hosed in that case since I have
1009 * no way to recover. This really will only happen
1010 * if we can get more TSN's higher before the pd-api-point.
1011 */
Michael Tuexena9d8c472016-04-18 22:22:59 +02001012 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001013 abort_flag,
1014 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
1015
1016 return;
1017 }
1018 /*
1019 * Ok we have two firsts and the one we just got
1020 * is smaller than the one we previously placed.. yuck!
1021 * We must swap them out.
1022 */
1023 /* swap the mbufs */
1024 tdata = control->data;
1025 control->data = chk->data;
1026 chk->data = tdata;
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001027 /* Save the lengths */
1028 chk->send_size = control->length;
1029 /* Recompute length of control and tail pointer */
1030 sctp_setup_tail_pointer(control);
Michael Tuexene5001952016-04-17 19:25:27 +02001031 /* Fix the FSN included */
1032 tmp = control->fsn_included;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001033 control->fsn_included = chk->rec.data.fsn;
1034 chk->rec.data.fsn = tmp;
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001035 /* Fix the TSN included */
1036 tmp = control->sinfo_tsn;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001037 control->sinfo_tsn = chk->rec.data.tsn;
1038 chk->rec.data.tsn = tmp;
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001039 /* Fix the PPID included */
1040 tmp = control->sinfo_ppid;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001041 control->sinfo_ppid = chk->rec.data.ppid;
1042 chk->rec.data.ppid = tmp;
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001043 /* Fix tail pointer */
Michael Tuexene5001952016-04-17 19:25:27 +02001044 goto place_chunk;
1045 }
1046 control->first_frag_seen = 1;
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001047 control->fsn_included = chk->rec.data.fsn;
1048 control->top_fsn = chk->rec.data.fsn;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001049 control->sinfo_tsn = chk->rec.data.tsn;
1050 control->sinfo_ppid = chk->rec.data.ppid;
Michael Tuexene5001952016-04-17 19:25:27 +02001051 control->data = chk->data;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001052 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001053 chk->data = NULL;
1054 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1055 sctp_setup_tail_pointer(control);
1056 return;
1057 }
1058place_chunk:
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001059 inserted = 0;
Michael Tuexene5001952016-04-17 19:25:27 +02001060 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01001061 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001062 /*
1063 * This one in queue is bigger than the new one, insert
1064 * the new one before at.
1065 */
1066 asoc->size_on_reasm_queue += chk->send_size;
1067 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1068 inserted = 1;
1069 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1070 break;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001071 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
Michael Tuexend3331282020-02-03 23:14:00 +01001072 /*
Michael Tuexene5001952016-04-17 19:25:27 +02001073 * They sent a duplicate fsn number. This
1074 * really should not happen since the FSN is
1075 * a TSN and it should have been dropped earlier.
1076 */
Michael Tuexena9d8c472016-04-18 22:22:59 +02001077 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001078 abort_flag,
1079 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1080 return;
1081 }
1082
1083 }
1084 if (inserted == 0) {
1085 /* Its at the end */
1086 asoc->size_on_reasm_queue += chk->send_size;
1087 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
Michael Tuexen00657ac2016-12-07 21:53:26 +01001088 control->top_fsn = chk->rec.data.fsn;
Michael Tuexene5001952016-04-17 19:25:27 +02001089 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1090 }
1091}
1092
1093static int
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001094sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1095 struct sctp_stream_in *strm, int inp_read_lock_held)
Michael Tuexene5001952016-04-17 19:25:27 +02001096{
1097 /*
1098 * Given a stream, strm, see if any of
1099 * the SSN's on it that are fragmented
1100 * are ready to deliver. If so go ahead
1101 * and place them on the read queue. In
1102 * so placing if we have hit the end, then
1103 * we need to remove them from the stream's queue.
1104 */
Michael Tuexene411f662016-12-17 23:36:21 +01001105 struct sctp_queued_to_read *control, *nctl = NULL;
Michael Tuexene5001952016-04-17 19:25:27 +02001106 uint32_t next_to_del;
1107 uint32_t pd_point;
1108 int ret = 0;
1109
1110 if (stcb->sctp_socket) {
1111 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1112 stcb->sctp_ep->partial_delivery_point);
1113 } else {
1114 pd_point = stcb->sctp_ep->partial_delivery_point;
1115 }
1116 control = TAILQ_FIRST(&strm->uno_inqueue);
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001117
Michael Tuexene411f662016-12-17 23:36:21 +01001118 if ((control != NULL) &&
Michael Tuexene5001952016-04-17 19:25:27 +02001119 (asoc->idata_supported == 0)) {
1120 /* Special handling needed for "old" data format */
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001121 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001122 goto done_un;
1123 }
1124 }
1125 if (strm->pd_api_started) {
1126 /* Can't add more */
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001127 return (0);
Michael Tuexene5001952016-04-17 19:25:27 +02001128 }
1129 while (control) {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001130 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001131 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
Michael Tuexene5001952016-04-17 19:25:27 +02001132 nctl = TAILQ_NEXT(control, next_instrm);
1133 if (control->end_added) {
1134 /* We just put the last bit on */
1135 if (control->on_strm_q) {
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001136#ifdef INVARIANTS
Michael Tuexene5001952016-04-17 19:25:27 +02001137 if (control->on_strm_q != SCTP_ON_UNORDERED ) {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001138 panic("Huh control: %p on_q: %d -- not unordered?",
Michael Tuexene5001952016-04-17 19:25:27 +02001139 control, control->on_strm_q);
1140 }
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001141#endif
Michael Tuexen07cc2ed2016-07-17 10:33:57 +02001142 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
Michael Tuexene5001952016-04-17 19:25:27 +02001143 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1144 control->on_strm_q = 0;
1145 }
1146 if (control->on_read_q == 0) {
1147 sctp_add_to_readq(stcb->sctp_ep, stcb,
1148 control,
1149 &stcb->sctp_socket->so_rcv, control->end_added,
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001150 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +02001151 }
1152 } else {
1153 /* Can we do a PD-API for this un-ordered guy? */
1154 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1155 strm->pd_api_started = 1;
1156 control->pdapi_started = 1;
1157 sctp_add_to_readq(stcb->sctp_ep, stcb,
1158 control,
1159 &stcb->sctp_socket->so_rcv, control->end_added,
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001160 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
Michael Tuexenea37a472017-12-08 00:07:41 +01001161
Michael Tuexene5001952016-04-17 19:25:27 +02001162 break;
1163 }
1164 }
1165 control = nctl;
1166 }
1167done_un:
1168 control = TAILQ_FIRST(&strm->inqueue);
1169 if (strm->pd_api_started) {
1170 /* Can't add more */
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001171 return (0);
Michael Tuexene5001952016-04-17 19:25:27 +02001172 }
1173 if (control == NULL) {
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001174 return (ret);
Michael Tuexene5001952016-04-17 19:25:27 +02001175 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01001176 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001177 /* Ok the guy at the top was being partially delivered
1178 * completed, so we remove it. Note
1179 * the pd_api flag was taken off when the
1180 * chunk was merged on in sctp_queue_data_for_reasm below.
1181 */
1182 nctl = TAILQ_NEXT(control, next_instrm);
1183 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001184 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001185 control, control->end_added, control->mid,
Michael Tuexene5001952016-04-17 19:25:27 +02001186 control->top_fsn, control->fsn_included,
Michael Tuexen00657ac2016-12-07 21:53:26 +01001187 strm->last_mid_delivered);
Michael Tuexene5001952016-04-17 19:25:27 +02001188 if (control->end_added) {
1189 if (control->on_strm_q) {
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001190#ifdef INVARIANTS
Michael Tuexene5001952016-04-17 19:25:27 +02001191 if (control->on_strm_q != SCTP_ON_ORDERED ) {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001192 panic("Huh control: %p on_q: %d -- not ordered?",
Michael Tuexene5001952016-04-17 19:25:27 +02001193 control, control->on_strm_q);
1194 }
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001195#endif
Michael Tuexen07cc2ed2016-07-17 10:33:57 +02001196 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
Michael Tuexene5001952016-04-17 19:25:27 +02001197 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001198 if (asoc->size_on_all_streams >= control->length) {
1199 asoc->size_on_all_streams -= control->length;
1200 } else {
1201#ifdef INVARIANTS
1202 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1203#else
1204 asoc->size_on_all_streams = 0;
1205#endif
1206 }
1207 sctp_ucount_decr(asoc->cnt_on_all_streams);
Michael Tuexene5001952016-04-17 19:25:27 +02001208 control->on_strm_q = 0;
1209 }
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001210 if (strm->pd_api_started && control->pdapi_started) {
1211 control->pdapi_started = 0;
1212 strm->pd_api_started = 0;
1213 }
Michael Tuexene5001952016-04-17 19:25:27 +02001214 if (control->on_read_q == 0) {
1215 sctp_add_to_readq(stcb->sctp_ep, stcb,
1216 control,
1217 &stcb->sctp_socket->so_rcv, control->end_added,
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001218 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +02001219 }
Michael Tuexene5001952016-04-17 19:25:27 +02001220 control = nctl;
1221 }
1222 }
1223 if (strm->pd_api_started) {
1224 /* Can't add more must have gotten an un-ordered above being partially delivered. */
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001225 return (0);
Michael Tuexene5001952016-04-17 19:25:27 +02001226 }
1227deliver_more:
Michael Tuexen00657ac2016-12-07 21:53:26 +01001228 next_to_del = strm->last_mid_delivered + 1;
Michael Tuexene5001952016-04-17 19:25:27 +02001229 if (control) {
1230 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001231 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001232 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
Michael Tuexene5001952016-04-17 19:25:27 +02001233 next_to_del);
1234 nctl = TAILQ_NEXT(control, next_instrm);
Michael Tuexen00657ac2016-12-07 21:53:26 +01001235 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
Michael Tuexene5001952016-04-17 19:25:27 +02001236 (control->first_frag_seen)) {
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001237 int done;
1238
Michael Tuexene5001952016-04-17 19:25:27 +02001239 /* Ok we can deliver it onto the stream. */
1240 if (control->end_added) {
1241 /* We are done with it afterwards */
1242 if (control->on_strm_q) {
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001243#ifdef INVARIANTS
Michael Tuexene5001952016-04-17 19:25:27 +02001244 if (control->on_strm_q != SCTP_ON_ORDERED ) {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001245 panic("Huh control: %p on_q: %d -- not ordered?",
Michael Tuexene5001952016-04-17 19:25:27 +02001246 control, control->on_strm_q);
1247 }
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001248#endif
Michael Tuexen07cc2ed2016-07-17 10:33:57 +02001249 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
Michael Tuexene5001952016-04-17 19:25:27 +02001250 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001251 if (asoc->size_on_all_streams >= control->length) {
1252 asoc->size_on_all_streams -= control->length;
1253 } else {
1254#ifdef INVARIANTS
1255 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1256#else
1257 asoc->size_on_all_streams = 0;
1258#endif
1259 }
1260 sctp_ucount_decr(asoc->cnt_on_all_streams);
Michael Tuexene5001952016-04-17 19:25:27 +02001261 control->on_strm_q = 0;
1262 }
1263 ret++;
1264 }
1265 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1266 /* A singleton now slipping through - mark it non-revokable too */
1267 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1268 } else if (control->end_added == 0) {
1269 /* Check if we can defer adding until its all there */
1270 if ((control->length < pd_point) || (strm->pd_api_started)) {
1271 /* Don't need it or cannot add more (one being delivered that way) */
1272 goto out;
1273 }
1274 }
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001275 done = (control->end_added) && (control->last_frag_seen);
Michael Tuexene5001952016-04-17 19:25:27 +02001276 if (control->on_read_q == 0) {
Michael Tuexencdba1262017-11-05 13:05:10 +01001277 if (!done) {
1278 if (asoc->size_on_all_streams >= control->length) {
1279 asoc->size_on_all_streams -= control->length;
1280 } else {
1281#ifdef INVARIANTS
1282 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1283#else
1284 asoc->size_on_all_streams = 0;
1285#endif
1286 }
1287 strm->pd_api_started = 1;
1288 control->pdapi_started = 1;
1289 }
Michael Tuexene5001952016-04-17 19:25:27 +02001290 sctp_add_to_readq(stcb->sctp_ep, stcb,
1291 control,
1292 &stcb->sctp_socket->so_rcv, control->end_added,
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001293 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +02001294 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01001295 strm->last_mid_delivered = next_to_del;
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001296 if (done) {
Michael Tuexene5001952016-04-17 19:25:27 +02001297 control = nctl;
1298 goto deliver_more;
Michael Tuexene5001952016-04-17 19:25:27 +02001299 }
1300 }
1301 }
1302out:
1303 return (ret);
1304}
1305
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001306
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001307uint32_t
Michael Tuexene5001952016-04-17 19:25:27 +02001308sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1309 struct sctp_stream_in *strm,
1310 struct sctp_tcb *stcb, struct sctp_association *asoc,
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001311 struct sctp_tmit_chunk *chk, int hold_rlock)
Michael Tuexene5001952016-04-17 19:25:27 +02001312{
1313 /*
1314 * Given a control and a chunk, merge the
1315 * data from the chk onto the control and free
1316 * up the chunk resources.
1317 */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001318 uint32_t added=0;
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001319 int i_locked = 0;
Michael Tuexene5001952016-04-17 19:25:27 +02001320
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001321 if (control->on_read_q && (hold_rlock == 0)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001322 /*
1323 * Its being pd-api'd so we must
1324 * do some locks.
1325 */
1326 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1327 i_locked = 1;
1328 }
1329 if (control->data == NULL) {
1330 control->data = chk->data;
1331 sctp_setup_tail_pointer(control);
1332 } else {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001333 sctp_add_to_tail_pointer(control, chk->data, &added);
Michael Tuexene5001952016-04-17 19:25:27 +02001334 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01001335 control->fsn_included = chk->rec.data.fsn;
Michael Tuexene5001952016-04-17 19:25:27 +02001336 asoc->size_on_reasm_queue -= chk->send_size;
1337 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
Michael Tuexen00657ac2016-12-07 21:53:26 +01001338 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001339 chk->data = NULL;
1340 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1341 control->first_frag_seen = 1;
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001342 control->sinfo_tsn = chk->rec.data.tsn;
1343 control->sinfo_ppid = chk->rec.data.ppid;
Michael Tuexene5001952016-04-17 19:25:27 +02001344 }
1345 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1346 /* Its complete */
1347 if ((control->on_strm_q) && (control->on_read_q)) {
1348 if (control->pdapi_started) {
1349 control->pdapi_started = 0;
1350 strm->pd_api_started = 0;
1351 }
1352 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1353 /* Unordered */
1354 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1355 control->on_strm_q = 0;
1356 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1357 /* Ordered */
1358 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
Michael Tuexencdba1262017-11-05 13:05:10 +01001359 /*
1360 * Don't need to decrement size_on_all_streams,
1361 * since control is on the read queue.
1362 */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001363 sctp_ucount_decr(asoc->cnt_on_all_streams);
Michael Tuexene5001952016-04-17 19:25:27 +02001364 control->on_strm_q = 0;
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001365#ifdef INVARIANTS
Michael Tuexene5001952016-04-17 19:25:27 +02001366 } else if (control->on_strm_q) {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001367 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
Michael Tuexene5001952016-04-17 19:25:27 +02001368 control->on_strm_q);
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001369#endif
Michael Tuexene5001952016-04-17 19:25:27 +02001370 }
1371 }
1372 control->end_added = 1;
1373 control->last_frag_seen = 1;
1374 }
1375 if (i_locked) {
1376 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1377 }
Michael Tuexen98b74552016-05-09 17:41:56 +02001378 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001379 return (added);
tuexendd729232011-11-01 23:04:43 +00001380}
1381
1382/*
1383 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1384 * queue, see if anthing can be delivered. If so pull it off (or as much as
1385 * we can. If we run out of space then we must dump what we can and set the
1386 * appropriate flag to say we queued what we could.
1387 */
1388static void
1389sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
Michael Tuexene5001952016-04-17 19:25:27 +02001390 struct sctp_queued_to_read *control,
1391 struct sctp_tmit_chunk *chk,
1392 int created_control,
1393 int *abort_flag, uint32_t tsn)
tuexendd729232011-11-01 23:04:43 +00001394{
Michael Tuexene5001952016-04-17 19:25:27 +02001395 uint32_t next_fsn;
1396 struct sctp_tmit_chunk *at, *nat;
Michael Tuexene411f662016-12-17 23:36:21 +01001397 struct sctp_stream_in *strm;
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001398 int do_wakeup, unordered;
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001399 uint32_t lenadded;
tuexendd729232011-11-01 23:04:43 +00001400
Michael Tuexene411f662016-12-17 23:36:21 +01001401 strm = &asoc->strmin[control->sinfo_stream];
Michael Tuexene5001952016-04-17 19:25:27 +02001402 /*
1403 * For old un-ordered data chunks.
1404 */
1405 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1406 unordered = 1;
1407 } else {
1408 unordered = 0;
1409 }
1410 /* Must be added to the stream-in queue */
1411 if (created_control) {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001412 if (unordered == 0) {
1413 sctp_ucount_incr(asoc->cnt_on_all_streams);
1414 }
Michael Tuexene5001952016-04-17 19:25:27 +02001415 if (sctp_place_control_in_stream(strm, asoc, control)) {
1416 /* Duplicate SSN? */
Michael Tuexena9d8c472016-04-18 22:22:59 +02001417 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001418 abort_flag,
1419 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
Michael Tuexen663a17e2017-09-21 11:49:43 +02001420 sctp_clean_up_control(stcb, control);
Michael Tuexene5001952016-04-17 19:25:27 +02001421 return;
1422 }
1423 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1424 /* Ok we created this control and now
1425 * lets validate that its legal i.e. there
1426 * is a B bit set, if not and we have
1427 * up to the cum-ack then its invalid.
1428 */
1429 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
Michael Tuexena9d8c472016-04-18 22:22:59 +02001430 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001431 abort_flag,
1432 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1433 return;
tuexendd729232011-11-01 23:04:43 +00001434 }
1435 }
Michael Tuexene5001952016-04-17 19:25:27 +02001436 }
Michael Tuexen34a90e22016-04-18 11:35:26 +02001437 if ((asoc->idata_supported == 0) && (unordered == 1)) {
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001438 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
tuexendd729232011-11-01 23:04:43 +00001439 return;
1440 }
Michael Tuexene5001952016-04-17 19:25:27 +02001441 /*
1442 * Ok we must queue the chunk into the reasembly portion:
1443 * o if its the first it goes to the control mbuf.
1444 * o if its not first but the next in sequence it goes to the control,
1445 * and each succeeding one in order also goes.
1446 * o if its not in order we place it on the list in its place.
1447 */
1448 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1449 /* Its the very first one. */
1450 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001451 "chunk is a first fsn: %u becomes fsn_included\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001452 chk->rec.data.fsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001453 if (control->first_frag_seen) {
tuexendd729232011-11-01 23:04:43 +00001454 /*
Michael Tuexene5001952016-04-17 19:25:27 +02001455 * Error on senders part, they either
1456 * sent us two data chunks with FIRST,
1457 * or they sent two un-ordered chunks that
1458 * were fragmented at the same time in the same stream.
tuexendd729232011-11-01 23:04:43 +00001459 */
Michael Tuexena9d8c472016-04-18 22:22:59 +02001460 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001461 abort_flag,
1462 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
Michael Tuexen3121b802016-04-10 23:28:19 +02001463 return;
Michael Tuexene5001952016-04-17 19:25:27 +02001464 }
1465 control->first_frag_seen = 1;
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001466 control->sinfo_ppid = chk->rec.data.ppid;
1467 control->sinfo_tsn = chk->rec.data.tsn;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001468 control->fsn_included = chk->rec.data.fsn;
Michael Tuexene5001952016-04-17 19:25:27 +02001469 control->data = chk->data;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001470 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001471 chk->data = NULL;
1472 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1473 sctp_setup_tail_pointer(control);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001474 asoc->size_on_all_streams += control->length;
Michael Tuexene5001952016-04-17 19:25:27 +02001475 } else {
1476 /* Place the chunk in our list */
1477 int inserted=0;
Michael Tuexen34a90e22016-04-18 11:35:26 +02001478 if (control->last_frag_seen == 0) {
Michael Tuexene5001952016-04-17 19:25:27 +02001479 /* Still willing to raise highest FSN seen */
Michael Tuexen00657ac2016-12-07 21:53:26 +01001480 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001481 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001482 "We have a new top_fsn: %u\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001483 chk->rec.data.fsn);
1484 control->top_fsn = chk->rec.data.fsn;
Michael Tuexene5001952016-04-17 19:25:27 +02001485 }
1486 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1487 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001488 "The last fsn is now in place fsn: %u\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001489 chk->rec.data.fsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001490 control->last_frag_seen = 1;
Michael Tuexen46878032019-08-31 10:22:19 +02001491 if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1492 SCTPDBG(SCTP_DEBUG_XXX,
1493 "New fsn: %u is not at top_fsn: %u -- abort\n",
1494 chk->rec.data.fsn,
1495 control->top_fsn);
1496 sctp_abort_in_reasm(stcb, control, chk,
1497 abort_flag,
1498 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1499 return;
1500 }
Michael Tuexene5001952016-04-17 19:25:27 +02001501 }
1502 if (asoc->idata_supported || control->first_frag_seen) {
Michael Tuexend3331282020-02-03 23:14:00 +01001503 /*
Michael Tuexene5001952016-04-17 19:25:27 +02001504 * For IDATA we always check since we know that
1505 * the first fragment is 0. For old DATA we have
Michael Tuexen34488e72016-05-03 22:11:59 +02001506 * to receive the first before we know the first FSN
Michael Tuexene5001952016-04-17 19:25:27 +02001507 * (which is the TSN).
Michael Tuexen3121b802016-04-10 23:28:19 +02001508 */
Michael Tuexen00657ac2016-12-07 21:53:26 +01001509 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001510 /* We have already delivered up to this so its a dup */
Michael Tuexena9d8c472016-04-18 22:22:59 +02001511 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001512 abort_flag,
Michael Tuexen46878032019-08-31 10:22:19 +02001513 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
Michael Tuexene5001952016-04-17 19:25:27 +02001514 return;
1515 }
1516 }
1517 } else {
1518 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1519 /* Second last? huh? */
1520 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001521 "Duplicate last fsn: %u (top: %u) -- abort\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001522 chk->rec.data.fsn, control->top_fsn);
Michael Tuexena9d8c472016-04-18 22:22:59 +02001523 sctp_abort_in_reasm(stcb, control,
Michael Tuexene5001952016-04-17 19:25:27 +02001524 chk, abort_flag,
Michael Tuexen46878032019-08-31 10:22:19 +02001525 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
Michael Tuexene5001952016-04-17 19:25:27 +02001526 return;
1527 }
1528 if (asoc->idata_supported || control->first_frag_seen) {
Michael Tuexend3331282020-02-03 23:14:00 +01001529 /*
Michael Tuexene5001952016-04-17 19:25:27 +02001530 * For IDATA we always check since we know that
1531 * the first fragment is 0. For old DATA we have
Michael Tuexen34488e72016-05-03 22:11:59 +02001532 * to receive the first before we know the first FSN
Michael Tuexene5001952016-04-17 19:25:27 +02001533 * (which is the TSN).
1534 */
1535
Michael Tuexen00657ac2016-12-07 21:53:26 +01001536 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001537 /* We have already delivered up to this so its a dup */
1538 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001539 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001540 chk->rec.data.fsn, control->fsn_included);
Michael Tuexena9d8c472016-04-18 22:22:59 +02001541 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001542 abort_flag,
Michael Tuexen46878032019-08-31 10:22:19 +02001543 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
Michael Tuexene5001952016-04-17 19:25:27 +02001544 return;
1545 }
1546 }
1547 /* validate not beyond top FSN if we have seen last one */
Michael Tuexen00657ac2016-12-07 21:53:26 +01001548 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001549 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001550 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001551 chk->rec.data.fsn,
Michael Tuexene5001952016-04-17 19:25:27 +02001552 control->top_fsn);
Michael Tuexena9d8c472016-04-18 22:22:59 +02001553 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001554 abort_flag,
Michael Tuexen46878032019-08-31 10:22:19 +02001555 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
Michael Tuexene5001952016-04-17 19:25:27 +02001556 return;
1557 }
1558 }
1559 /*
1560 * If we reach here, we need to place the
Michael Tuexend3331282020-02-03 23:14:00 +01001561 * new chunk in the reassembly for this
Michael Tuexene5001952016-04-17 19:25:27 +02001562 * control.
1563 */
1564 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001565 "chunk is a not first fsn: %u needs to be inserted\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001566 chk->rec.data.fsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001567 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01001568 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001569 /*
1570 * This one in queue is bigger than the new one, insert
1571 * the new one before at.
1572 */
1573 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001574 "Insert it before fsn: %u\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001575 at->rec.data.fsn);
Michael Tuexen3121b802016-04-10 23:28:19 +02001576 asoc->size_on_reasm_queue += chk->send_size;
1577 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
Michael Tuexene5001952016-04-17 19:25:27 +02001578 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1579 inserted = 1;
1580 break;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001581 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
Michael Tuexene5001952016-04-17 19:25:27 +02001582 /* Gak, He sent me a duplicate str seq number */
1583 /*
1584 * foo bar, I guess I will just free this new guy,
1585 * should we abort too? FIX ME MAYBE? Or it COULD be
1586 * that the SSN's have wrapped. Maybe I should
1587 * compare to TSN somehow... sigh for now just blow
1588 * away the chunk!
1589 */
1590 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001591 "Duplicate to fsn: %u -- abort\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001592 at->rec.data.fsn);
Michael Tuexena9d8c472016-04-18 22:22:59 +02001593 sctp_abort_in_reasm(stcb, control,
Michael Tuexene5001952016-04-17 19:25:27 +02001594 chk, abort_flag,
Michael Tuexen46878032019-08-31 10:22:19 +02001595 SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
Michael Tuexene5001952016-04-17 19:25:27 +02001596 return;
1597 }
1598 }
1599 if (inserted == 0) {
1600 /* Goes on the end */
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001601 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001602 chk->rec.data.fsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001603 asoc->size_on_reasm_queue += chk->send_size;
1604 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1605 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1606 }
1607 }
1608 /*
1609 * Ok lets see if we can suck any up into the control
1610 * structure that are in seq if it makes sense.
1611 */
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001612 do_wakeup = 0;
Michael Tuexene5001952016-04-17 19:25:27 +02001613 /*
1614 * If the first fragment has not been
1615 * seen there is no sense in looking.
1616 */
1617 if (control->first_frag_seen) {
1618 next_fsn = control->fsn_included + 1;
1619 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01001620 if (at->rec.data.fsn == next_fsn) {
Michael Tuexene5001952016-04-17 19:25:27 +02001621 /* We can add this one now to the control */
1622 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001623 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
Michael Tuexene5001952016-04-17 19:25:27 +02001624 control, at,
Michael Tuexen00657ac2016-12-07 21:53:26 +01001625 at->rec.data.fsn,
Michael Tuexene5001952016-04-17 19:25:27 +02001626 next_fsn, control->fsn_included);
1627 TAILQ_REMOVE(&control->reasm, at, sctp_next);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001628 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001629 if (control->on_read_q) {
1630 do_wakeup = 1;
Michael Tuexena7360a12017-09-17 11:30:34 +02001631 } else {
1632 /*
1633 * We only add to the size-on-all-streams
1634 * if its not on the read q. The read q
1635 * flag will cause a sballoc so its accounted
1636 * for there.
1637 */
1638 asoc->size_on_all_streams += lenadded;
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001639 }
Michael Tuexene5001952016-04-17 19:25:27 +02001640 next_fsn++;
1641 if (control->end_added && control->pdapi_started) {
1642 if (strm->pd_api_started) {
1643 strm->pd_api_started = 0;
1644 control->pdapi_started = 0;
1645 }
1646 if (control->on_read_q == 0) {
1647 sctp_add_to_readq(stcb->sctp_ep, stcb,
1648 control,
1649 &stcb->sctp_socket->so_rcv, control->end_added,
1650 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1651 }
1652 break;
1653 }
1654 } else {
Michael Tuexen3121b802016-04-10 23:28:19 +02001655 break;
1656 }
Michael Tuexen48ebe5e2016-04-10 23:10:14 +02001657 }
1658 }
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001659 if (do_wakeup) {
Michael Tuexen65394262016-05-09 08:17:54 +02001660#if defined(__Userspace__)
1661 sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD);
1662#endif
Michael Tuexen98b74552016-05-09 17:41:56 +02001663 /* Need to wakeup the reader */
Michael Tuexena9d8c472016-04-18 22:22:59 +02001664 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00001665 }
tuexendd729232011-11-01 23:04:43 +00001666}
1667
Michael Tuexene5001952016-04-17 19:25:27 +02001668static struct sctp_queued_to_read *
Michael Tuexen00657ac2016-12-07 21:53:26 +01001669sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
tuexendd729232011-11-01 23:04:43 +00001670{
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001671 struct sctp_queued_to_read *control;
1672
Michael Tuexene5001952016-04-17 19:25:27 +02001673 if (ordered) {
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001674 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01001675 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001676 break;
tuexendd729232011-11-01 23:04:43 +00001677 }
Michael Tuexene5001952016-04-17 19:25:27 +02001678 }
1679 } else {
Michael Tuexen00657ac2016-12-07 21:53:26 +01001680 if (idata_supported) {
1681 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1682 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1683 break;
1684 }
tuexendd729232011-11-01 23:04:43 +00001685 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01001686 } else {
1687 control = TAILQ_FIRST(&strm->uno_inqueue);
tuexendd729232011-11-01 23:04:43 +00001688 }
1689 }
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001690 return (control);
tuexendd729232011-11-01 23:04:43 +00001691}
1692
tuexendd729232011-11-01 23:04:43 +00001693static int
1694sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
Michael Tuexene5001952016-04-17 19:25:27 +02001695 struct mbuf **m, int offset, int chk_length,
1696 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001697 int *break_flag, int last_chunk, uint8_t chk_type)
tuexendd729232011-11-01 23:04:43 +00001698{
Michael Tuexen022ef442018-05-21 17:04:54 +02001699 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
Michael Tuexen00657ac2016-12-07 21:53:26 +01001700 uint32_t tsn, fsn, gap, mid;
tuexendd729232011-11-01 23:04:43 +00001701 struct mbuf *dmbuf;
tuexen9784e9a2011-12-18 13:04:23 +00001702 int the_len;
tuexendd729232011-11-01 23:04:43 +00001703 int need_reasm_check = 0;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001704 uint16_t sid;
t00fcxen08f9ff92014-03-16 13:38:54 +00001705 struct mbuf *op_err;
1706 char msg[SCTP_DIAG_INFO_LEN];
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001707 struct sctp_queued_to_read *control, *ncontrol;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001708 uint32_t ppid;
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001709 uint8_t chk_flags;
tuexendd729232011-11-01 23:04:43 +00001710 struct sctp_stream_reset_list *liste;
Michael Tuexene5001952016-04-17 19:25:27 +02001711 int ordered;
1712 size_t clen;
1713 int created_control = 0;
tuexendd729232011-11-01 23:04:43 +00001714
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001715 if (chk_type == SCTP_IDATA) {
1716 struct sctp_idata_chunk *chunk, chunk_buf;
1717
1718 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1719 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1720 chk_flags = chunk->ch.chunk_flags;
Michael Tuexene5001952016-04-17 19:25:27 +02001721 clen = sizeof(struct sctp_idata_chunk);
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001722 tsn = ntohl(chunk->dp.tsn);
1723 sid = ntohs(chunk->dp.sid);
1724 mid = ntohl(chunk->dp.mid);
1725 if (chk_flags & SCTP_DATA_FIRST_FRAG) {
Michael Tuexene5001952016-04-17 19:25:27 +02001726 fsn = 0;
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001727 ppid = chunk->dp.ppid_fsn.ppid;
1728 } else {
1729 fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1730 ppid = 0xffffffff; /* Use as an invalid value. */
1731 }
Michael Tuexene5001952016-04-17 19:25:27 +02001732 } else {
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001733 struct sctp_data_chunk *chunk, chunk_buf;
1734
1735 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1736 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1737 chk_flags = chunk->ch.chunk_flags;
Michael Tuexene5001952016-04-17 19:25:27 +02001738 clen = sizeof(struct sctp_data_chunk);
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001739 tsn = ntohl(chunk->dp.tsn);
1740 sid = ntohs(chunk->dp.sid);
1741 mid = (uint32_t)(ntohs(chunk->dp.ssn));
Michael Tuexene5001952016-04-17 19:25:27 +02001742 fsn = tsn;
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001743 ppid = chunk->dp.ppid;
Michael Tuexene5001952016-04-17 19:25:27 +02001744 }
Michael Tuexene5001952016-04-17 19:25:27 +02001745 if ((size_t)chk_length == clen) {
1746 /*
1747 * Need to send an abort since we had a
1748 * empty data chunk.
1749 */
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001750 op_err = sctp_generate_no_user_data_cause(tsn);
Michael Tuexen91565952020-02-03 23:23:28 +01001751 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
Michael Tuexene5001952016-04-17 19:25:27 +02001752 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1753 *abort_flag = 1;
1754 return (0);
1755 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001756 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
tuexendd729232011-11-01 23:04:43 +00001757 asoc->send_sack = 1;
1758 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001759 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
tuexendd729232011-11-01 23:04:43 +00001760 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1761 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1762 }
1763 if (stcb == NULL) {
1764 return (0);
1765 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001766 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
tuexendd729232011-11-01 23:04:43 +00001767 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1768 /* It is a duplicate */
1769 SCTP_STAT_INCR(sctps_recvdupdata);
1770 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1771 /* Record a dup for the next outbound sack */
1772 asoc->dup_tsns[asoc->numduptsns] = tsn;
1773 asoc->numduptsns++;
1774 }
1775 asoc->send_sack = 1;
1776 return (0);
1777 }
1778 /* Calculate the number of TSN's between the base and this TSN */
1779 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1780 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1781 /* Can't hold the bit in the mapping at max array, toss it */
1782 return (0);
1783 }
1784 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1785 SCTP_TCB_LOCK_ASSERT(stcb);
1786 if (sctp_expand_mapping_array(asoc, gap)) {
1787 /* Can't expand, drop it */
1788 return (0);
1789 }
1790 }
1791 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1792 *high_tsn = tsn;
1793 }
1794 /* See if we have received this one already */
1795 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1796 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1797 SCTP_STAT_INCR(sctps_recvdupdata);
1798 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1799 /* Record a dup for the next outbound sack */
1800 asoc->dup_tsns[asoc->numduptsns] = tsn;
1801 asoc->numduptsns++;
1802 }
1803 asoc->send_sack = 1;
1804 return (0);
1805 }
1806 /*
1807 * Check to see about the GONE flag, duplicates would cause a sack
1808 * to be sent up above
1809 */
1810 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1811 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
t00fcxen08f9ff92014-03-16 13:38:54 +00001812 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
tuexendd729232011-11-01 23:04:43 +00001813 /*
1814 * wait a minute, this guy is gone, there is no longer a
1815 * receiver. Send peer an ABORT!
1816 */
t00fcxen08f9ff92014-03-16 13:38:54 +00001817 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
tuexenda53ff02012-05-14 09:00:59 +00001818 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00001819 *abort_flag = 1;
1820 return (0);
1821 }
1822 /*
1823 * Now before going further we see if there is room. If NOT then we
1824 * MAY let one through only IF this TSN is the one we are waiting
1825 * for on a partial delivery API.
1826 */
1827
Michael Tuexene5001952016-04-17 19:25:27 +02001828 /* Is the stream valid? */
Michael Tuexen00657ac2016-12-07 21:53:26 +01001829 if (sid >= asoc->streamincnt) {
Michael Tuexenf39c4292015-09-12 19:39:48 +02001830 struct sctp_error_invalid_stream *cause;
tuexendd729232011-11-01 23:04:43 +00001831
Michael Tuexenf39c4292015-09-12 19:39:48 +02001832 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1833 0, M_NOWAIT, 1, MT_DATA);
1834 if (op_err != NULL) {
tuexendd729232011-11-01 23:04:43 +00001835 /* add some space up front so prepend will work well */
Michael Tuexenf39c4292015-09-12 19:39:48 +02001836 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1837 cause = mtod(op_err, struct sctp_error_invalid_stream *);
tuexendd729232011-11-01 23:04:43 +00001838 /*
1839 * Error causes are just param's and this one has
1840 * two back to back phdr, one with the error type
1841 * and size, the other with the streamid and a rsvd
1842 */
Michael Tuexenf39c4292015-09-12 19:39:48 +02001843 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1844 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1845 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001846 cause->stream_id = htons(sid);
Michael Tuexenf39c4292015-09-12 19:39:48 +02001847 cause->reserved = htons(0);
1848 sctp_queue_op_err(stcb, op_err);
tuexendd729232011-11-01 23:04:43 +00001849 }
1850 SCTP_STAT_INCR(sctps_badsid);
1851 SCTP_TCB_LOCK_ASSERT(stcb);
1852 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1853 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1854 asoc->highest_tsn_inside_nr_map = tsn;
1855 }
1856 if (tsn == (asoc->cumulative_tsn + 1)) {
1857 /* Update cum-ack */
1858 asoc->cumulative_tsn = tsn;
1859 }
1860 return (0);
1861 }
1862 /*
Michael Tuexene5001952016-04-17 19:25:27 +02001863 * If its a fragmented message, lets see if we can
1864 * find the control on the reassembly queues.
tuexendd729232011-11-01 23:04:43 +00001865 */
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001866 if ((chk_type == SCTP_IDATA) &&
1867 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001868 (fsn == 0)) {
Michael Tuexend3331282020-02-03 23:14:00 +01001869 /*
1870 * The first *must* be fsn 0, and other
Michael Tuexene5001952016-04-17 19:25:27 +02001871 * (middle/end) pieces can *not* be fsn 0.
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001872 * XXX: This can happen in case of a wrap around.
1873 * Ignore is for now.
Michael Tuexene5001952016-04-17 19:25:27 +02001874 */
Michael Tuexenedd369d2020-05-19 09:42:15 +02001875 SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
Michael Tuexene5001952016-04-17 19:25:27 +02001876 goto err_out;
1877 }
Michael Tuexene411f662016-12-17 23:36:21 +01001878 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001879 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001880 chk_flags, control);
1881 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
Michael Tuexene5001952016-04-17 19:25:27 +02001882 /* See if we can find the re-assembly entity */
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001883 if (control != NULL) {
Michael Tuexene5001952016-04-17 19:25:27 +02001884 /* We found something, does it belong? */
Michael Tuexen00657ac2016-12-07 21:53:26 +01001885 if (ordered && (mid != control->mid)) {
Michael Tuexenedd369d2020-05-19 09:42:15 +02001886 SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
Michael Tuexene5001952016-04-17 19:25:27 +02001887 err_out:
1888 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexen91565952020-02-03 23:23:28 +01001889 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
Michael Tuexene5001952016-04-17 19:25:27 +02001890 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1891 *abort_flag = 1;
1892 return (0);
1893 }
1894 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1895 /* We can't have a switched order with an unordered chunk */
Michael Tuexenedd369d2020-05-19 09:42:15 +02001896 SCTP_SNPRINTF(msg, sizeof(msg),
1897 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1898 tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001899 goto err_out;
1900 }
1901 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1902 /* We can't have a switched unordered with a ordered chunk */
Michael Tuexenedd369d2020-05-19 09:42:15 +02001903 SCTP_SNPRINTF(msg, sizeof(msg),
Michael Tuexend98d2c42020-05-18 14:09:04 +02001904 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
Michael Tuexenedd369d2020-05-19 09:42:15 +02001905 tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001906 goto err_out;
1907 }
1908 }
1909 } else {
1910 /* Its a complete segment. Lets validate we
1911 * don't have a re-assembly going on with
1912 * the same Stream/Seq (for ordered) or in
1913 * the same Stream for unordered.
1914 */
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001915 if (control != NULL) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01001916 if (ordered || asoc->idata_supported) {
1917 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001918 chk_flags, mid);
Michael Tuexenedd369d2020-05-19 09:42:15 +02001919 SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001920 goto err_out;
1921 } else {
1922 if ((tsn == control->fsn_included + 1) &&
1923 (control->end_added == 0)) {
Michael Tuexenedd369d2020-05-19 09:42:15 +02001924 SCTP_SNPRINTF(msg, sizeof(msg),
1925 "Illegal message sequence, missing end for MID: %8.8x",
1926 control->fsn_included);
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001927 goto err_out;
1928 } else {
1929 control = NULL;
1930 }
1931 }
Michael Tuexene5001952016-04-17 19:25:27 +02001932 }
1933 }
1934 /* now do the tests */
1935 if (((asoc->cnt_on_all_streams +
1936 asoc->cnt_on_reasm_queue +
1937 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1938 (((int)asoc->my_rwnd) <= 0)) {
1939 /*
1940 * When we have NO room in the rwnd we check to make sure
1941 * the reader is doing its job...
1942 */
1943 if (stcb->sctp_socket->so_rcv.sb_cc) {
1944 /* some to read, wake-up */
1945#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1946 struct socket *so;
1947
1948 so = SCTP_INP_SO(stcb->sctp_ep);
1949 atomic_add_int(&stcb->asoc.refcnt, 1);
1950 SCTP_TCB_UNLOCK(stcb);
1951 SCTP_SOCKET_LOCK(so, 1);
1952 SCTP_TCB_LOCK(stcb);
1953 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1954 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1955 /* assoc was freed while we were unlocked */
1956 SCTP_SOCKET_UNLOCK(so, 1);
1957 return (0);
1958 }
1959#endif
1960 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1961#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1962 SCTP_SOCKET_UNLOCK(so, 1);
1963#endif
1964 }
1965 /* now is it in the mapping array of what we have accepted? */
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001966 if (chk_type == SCTP_DATA) {
Michael Tuexene5001952016-04-17 19:25:27 +02001967 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1968 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1969 /* Nope not in the valid range dump it */
1970 dump_packet:
1971 sctp_set_rwnd(stcb, asoc);
1972 if ((asoc->cnt_on_all_streams +
1973 asoc->cnt_on_reasm_queue +
1974 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1975 SCTP_STAT_INCR(sctps_datadropchklmt);
1976 } else {
1977 SCTP_STAT_INCR(sctps_datadroprwnd);
1978 }
1979 *break_flag = 1;
1980 return (0);
1981 }
1982 } else {
1983 if (control == NULL) {
1984 goto dump_packet;
1985 }
1986 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1987 goto dump_packet;
1988 }
1989 }
1990 }
tuexendd729232011-11-01 23:04:43 +00001991#ifdef SCTP_ASOCLOG_OF_TSNS
1992 SCTP_TCB_LOCK_ASSERT(stcb);
1993 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1994 asoc->tsn_in_at = 0;
1995 asoc->tsn_in_wrapped = 1;
1996 }
1997 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001998 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1999 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
tuexendd729232011-11-01 23:04:43 +00002000 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
2001 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
2002 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
2003 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
2004 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
2005 asoc->tsn_in_at++;
2006#endif
Michael Tuexene5001952016-04-17 19:25:27 +02002007 /*
2008 * Before we continue lets validate that we are not being fooled by
2009 * an evil attacker. We can only have Nk chunks based on our TSN
2010 * spread allowed by the mapping array N * 8 bits, so there is no
2011 * way our stream sequence numbers could have wrapped. We of course
2012 * only validate the FIRST fragment so the bit must be set.
2013 */
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002014 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
tuexendd729232011-11-01 23:04:43 +00002015 (TAILQ_EMPTY(&asoc->resetHead)) &&
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002016 (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
Michael Tuexen00657ac2016-12-07 21:53:26 +01002017 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
tuexendd729232011-11-01 23:04:43 +00002018 /* The incoming sseq is behind where we last delivered? */
Michael Tuexeneccb4be2016-04-18 08:58:59 +02002019 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01002020 mid, asoc->strmin[sid].last_mid_delivered);
tuexendd729232011-11-01 23:04:43 +00002021
Michael Tuexen00657ac2016-12-07 21:53:26 +01002022 if (asoc->idata_supported) {
Michael Tuexenedd369d2020-05-19 09:42:15 +02002023 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2024 asoc->strmin[sid].last_mid_delivered,
2025 tsn,
2026 sid,
2027 mid);
Michael Tuexen00657ac2016-12-07 21:53:26 +01002028 } else {
Michael Tuexenedd369d2020-05-19 09:42:15 +02002029 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2030 (uint16_t)asoc->strmin[sid].last_mid_delivered,
2031 tsn,
2032 sid,
2033 (uint16_t)mid);
Michael Tuexen00657ac2016-12-07 21:53:26 +01002034 }
t00fcxen08f9ff92014-03-16 13:38:54 +00002035 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexen91565952020-02-03 23:23:28 +01002036 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
t00fcxen08f9ff92014-03-16 13:38:54 +00002037 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00002038 *abort_flag = 1;
2039 return (0);
2040 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002041 if (chk_type == SCTP_IDATA) {
Michael Tuexene5001952016-04-17 19:25:27 +02002042 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2043 } else {
2044 the_len = (chk_length - sizeof(struct sctp_data_chunk));
2045 }
tuexendd729232011-11-01 23:04:43 +00002046 if (last_chunk == 0) {
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002047 if (chk_type == SCTP_IDATA) {
Michael Tuexene5001952016-04-17 19:25:27 +02002048 dmbuf = SCTP_M_COPYM(*m,
2049 (offset + sizeof(struct sctp_idata_chunk)),
2050 the_len, M_NOWAIT);
2051 } else {
2052 dmbuf = SCTP_M_COPYM(*m,
2053 (offset + sizeof(struct sctp_data_chunk)),
2054 the_len, M_NOWAIT);
2055 }
tuexendd729232011-11-01 23:04:43 +00002056#ifdef SCTP_MBUF_LOGGING
2057 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
t00fcxen8285bce2015-01-10 21:09:55 +00002058 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
tuexendd729232011-11-01 23:04:43 +00002059 }
2060#endif
2061 } else {
2062 /* We can steal the last chunk */
2063 int l_len;
2064 dmbuf = *m;
2065 /* lop off the top part */
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002066 if (chk_type == SCTP_IDATA) {
Michael Tuexene5001952016-04-17 19:25:27 +02002067 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2068 } else {
2069 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2070 }
tuexendd729232011-11-01 23:04:43 +00002071 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2072 l_len = SCTP_BUF_LEN(dmbuf);
2073 } else {
2074 /* need to count up the size hopefully
2075 * does not hit this to often :-0
2076 */
2077 struct mbuf *lat;
tuexen63fc0bb2011-12-27 12:24:52 +00002078
tuexendd729232011-11-01 23:04:43 +00002079 l_len = 0;
tuexen63fc0bb2011-12-27 12:24:52 +00002080 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
tuexendd729232011-11-01 23:04:43 +00002081 l_len += SCTP_BUF_LEN(lat);
tuexendd729232011-11-01 23:04:43 +00002082 }
2083 }
2084 if (l_len > the_len) {
2085 /* Trim the end round bytes off too */
2086 m_adj(dmbuf, -(l_len - the_len));
2087 }
2088 }
2089 if (dmbuf == NULL) {
2090 SCTP_STAT_INCR(sctps_nomem);
2091 return (0);
2092 }
Michael Tuexene5001952016-04-17 19:25:27 +02002093 /*
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002094 * Now no matter what, we need a control, get one
Michael Tuexene5001952016-04-17 19:25:27 +02002095 * if we don't have one (we may have gotten it
2096 * above when we found the message was fragmented
2097 */
2098 if (control == NULL) {
2099 sctp_alloc_a_readq(stcb, control);
2100 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
Michael Tuexen00657ac2016-12-07 21:53:26 +01002101 ppid,
2102 sid,
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002103 chk_flags,
Michael Tuexen00657ac2016-12-07 21:53:26 +01002104 NULL, fsn, mid);
Michael Tuexene5001952016-04-17 19:25:27 +02002105 if (control == NULL) {
2106 SCTP_STAT_INCR(sctps_nomem);
2107 return (0);
2108 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002109 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
Michael Tuexen3381e772017-07-19 17:14:51 +02002110 struct mbuf *mm;
2111
Michael Tuexene5001952016-04-17 19:25:27 +02002112 control->data = dmbuf;
Michael Tuexenc38740e2019-10-06 10:52:55 +02002113 control->tail_mbuf = NULL;
Michael Tuexen3381e772017-07-19 17:14:51 +02002114 for (mm = control->data; mm; mm = mm->m_next) {
2115 control->length += SCTP_BUF_LEN(mm);
Michael Tuexenc38740e2019-10-06 10:52:55 +02002116 if (SCTP_BUF_NEXT(mm) == NULL) {
2117 control->tail_mbuf = mm;
2118 }
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002119 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002120 control->end_added = 1;
2121 control->last_frag_seen = 1;
2122 control->first_frag_seen = 1;
2123 control->fsn_included = fsn;
2124 control->top_fsn = fsn;
Michael Tuexene5001952016-04-17 19:25:27 +02002125 }
2126 created_control = 1;
2127 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01002128 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002129 chk_flags, ordered, mid, control);
2130 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
tuexendd729232011-11-01 23:04:43 +00002131 TAILQ_EMPTY(&asoc->resetHead) &&
2132 ((ordered == 0) ||
Michael Tuexen00657ac2016-12-07 21:53:26 +01002133 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2134 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
tuexendd729232011-11-01 23:04:43 +00002135 /* Candidate for express delivery */
2136 /*
2137 * Its not fragmented, No PD-API is up, Nothing in the
2138 * delivery queue, Its un-ordered OR ordered and the next to
2139 * deliver AND nothing else is stuck on the stream queue,
2140 * And there is room for it in the socket buffer. Lets just
2141 * stuff it up the buffer....
2142 */
tuexendd729232011-11-01 23:04:43 +00002143 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2144 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2145 asoc->highest_tsn_inside_nr_map = tsn;
2146 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01002147 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2148 control, mid);
Michael Tuexene5001952016-04-17 19:25:27 +02002149
tuexendd729232011-11-01 23:04:43 +00002150 sctp_add_to_readq(stcb->sctp_ep, stcb,
2151 control, &stcb->sctp_socket->so_rcv,
2152 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2153
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002154 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
tuexendd729232011-11-01 23:04:43 +00002155 /* for ordered, bump what we delivered */
Michael Tuexene411f662016-12-17 23:36:21 +01002156 asoc->strmin[sid].last_mid_delivered++;
tuexendd729232011-11-01 23:04:43 +00002157 }
2158 SCTP_STAT_INCR(sctps_recvexpress);
2159 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01002160 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
tuexendd729232011-11-01 23:04:43 +00002161 SCTP_STR_LOG_FROM_EXPRS_DEL);
2162 }
2163 control = NULL;
tuexendd729232011-11-01 23:04:43 +00002164 goto finish_express_del;
2165 }
tuexen63fc0bb2011-12-27 12:24:52 +00002166
Michael Tuexene5001952016-04-17 19:25:27 +02002167 /* Now will we need a chunk too? */
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002168 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
tuexendd729232011-11-01 23:04:43 +00002169 sctp_alloc_a_chunk(stcb, chk);
2170 if (chk == NULL) {
2171 /* No memory so we drop the chunk */
2172 SCTP_STAT_INCR(sctps_nomem);
2173 if (last_chunk == 0) {
2174 /* we copied it, free the copy */
2175 sctp_m_freem(dmbuf);
2176 }
2177 return (0);
2178 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01002179 chk->rec.data.tsn = tsn;
tuexendd729232011-11-01 23:04:43 +00002180 chk->no_fr_allowed = 0;
Michael Tuexen00657ac2016-12-07 21:53:26 +01002181 chk->rec.data.fsn = fsn;
2182 chk->rec.data.mid = mid;
2183 chk->rec.data.sid = sid;
2184 chk->rec.data.ppid = ppid;
tuexendd729232011-11-01 23:04:43 +00002185 chk->rec.data.context = stcb->asoc.context;
2186 chk->rec.data.doing_fast_retransmit = 0;
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002187 chk->rec.data.rcv_flags = chk_flags;
tuexendd729232011-11-01 23:04:43 +00002188 chk->asoc = asoc;
2189 chk->send_size = the_len;
2190 chk->whoTo = net;
Michael Tuexen00657ac2016-12-07 21:53:26 +01002191 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
Michael Tuexene5001952016-04-17 19:25:27 +02002192 chk,
Michael Tuexen00657ac2016-12-07 21:53:26 +01002193 control, mid);
tuexendd729232011-11-01 23:04:43 +00002194 atomic_add_int(&net->ref_count, 1);
2195 chk->data = dmbuf;
Michael Tuexen3121b802016-04-10 23:28:19 +02002196 }
Michael Tuexene5001952016-04-17 19:25:27 +02002197 /* Set the appropriate TSN mark */
2198 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2199 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2200 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2201 asoc->highest_tsn_inside_nr_map = tsn;
2202 }
2203 } else {
2204 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2205 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2206 asoc->highest_tsn_inside_map = tsn;
2207 }
2208 }
2209 /* Now is it complete (i.e. not fragmented)? */
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002210 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
Michael Tuexene5001952016-04-17 19:25:27 +02002211 /*
2212 * Special check for when streams are resetting. We
2213 * could be more smart about this and check the
2214 * actual stream to see if it is not being reset..
2215 * that way we would not create a HOLB when amongst
2216 * streams being reset and those not being reset.
2217 *
2218 */
2219 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2220 SCTP_TSN_GT(tsn, liste->tsn)) {
Michael Tuexen3121b802016-04-10 23:28:19 +02002221 /*
Michael Tuexene5001952016-04-17 19:25:27 +02002222 * yep its past where we need to reset... go
2223 * ahead and queue it.
Michael Tuexen3121b802016-04-10 23:28:19 +02002224 */
Michael Tuexene5001952016-04-17 19:25:27 +02002225 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2226 /* first one on */
2227 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2228 } else {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002229 struct sctp_queued_to_read *lcontrol, *nlcontrol;
Michael Tuexene5001952016-04-17 19:25:27 +02002230 unsigned char inserted = 0;
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002231 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2232 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
Michael Tuexen3121b802016-04-10 23:28:19 +02002233
Michael Tuexene5001952016-04-17 19:25:27 +02002234 continue;
2235 } else {
2236 /* found it */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002237 TAILQ_INSERT_BEFORE(lcontrol, control, next);
Michael Tuexene5001952016-04-17 19:25:27 +02002238 inserted = 1;
2239 break;
2240 }
Michael Tuexen3121b802016-04-10 23:28:19 +02002241 }
Michael Tuexene5001952016-04-17 19:25:27 +02002242 if (inserted == 0) {
2243 /*
2244 * must be put at end, use
2245 * prevP (all setup from
2246 * loop) to setup nextP.
2247 */
2248 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2249 }
2250 }
2251 goto finish_express_del;
2252 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002253 if (chk_flags & SCTP_DATA_UNORDERED) {
Michael Tuexene5001952016-04-17 19:25:27 +02002254 /* queue directly into socket buffer */
Michael Tuexen00657ac2016-12-07 21:53:26 +01002255 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2256 control, mid);
Michael Tuexene5001952016-04-17 19:25:27 +02002257 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2258 sctp_add_to_readq(stcb->sctp_ep, stcb,
2259 control,
2260 &stcb->sctp_socket->so_rcv, 1,
2261 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2262
2263 } else {
Michael Tuexen00657ac2016-12-07 21:53:26 +01002264 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2265 mid);
Michael Tuexene411f662016-12-17 23:36:21 +01002266 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
Michael Tuexene5001952016-04-17 19:25:27 +02002267 if (*abort_flag) {
t00fcxen2ed8f3d2014-04-23 21:28:37 +00002268 if (last_chunk) {
2269 *m = NULL;
2270 }
tuexendd729232011-11-01 23:04:43 +00002271 return (0);
tuexendd729232011-11-01 23:04:43 +00002272 }
2273 }
Michael Tuexene5001952016-04-17 19:25:27 +02002274 goto finish_express_del;
2275 }
2276 /* If we reach here its a reassembly */
2277 need_reasm_check = 1;
2278 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexen00657ac2016-12-07 21:53:26 +01002279 "Queue data to stream for reasm control: %p MID: %u\n",
2280 control, mid);
Michael Tuexene411f662016-12-17 23:36:21 +01002281 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02002282 if (*abort_flag) {
2283 /*
2284 * the assoc is now gone and chk was put onto the
2285 * reasm queue, which has all been freed.
2286 */
2287 if (last_chunk) {
2288 *m = NULL;
tuexendd729232011-11-01 23:04:43 +00002289 }
Michael Tuexene5001952016-04-17 19:25:27 +02002290 return (0);
tuexendd729232011-11-01 23:04:43 +00002291 }
2292finish_express_del:
Michael Tuexene5001952016-04-17 19:25:27 +02002293 /* Here we tidy up things */
tuexen15f99d82012-04-19 16:08:38 +00002294 if (tsn == (asoc->cumulative_tsn + 1)) {
2295 /* Update cum-ack */
2296 asoc->cumulative_tsn = tsn;
tuexendd729232011-11-01 23:04:43 +00002297 }
2298 if (last_chunk) {
2299 *m = NULL;
2300 }
2301 if (ordered) {
2302 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2303 } else {
2304 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2305 }
2306 SCTP_STAT_INCR(sctps_recvdata);
2307 /* Set it present please */
2308 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01002309 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
tuexendd729232011-11-01 23:04:43 +00002310 }
2311 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2312 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2313 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2314 }
Michael Tuexene411f662016-12-17 23:36:21 +01002315 if (need_reasm_check) {
2316 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2317 need_reasm_check = 0;
2318 }
tuexendd729232011-11-01 23:04:43 +00002319 /* check the special flag for stream resets */
2320 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2321 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2322 /*
2323 * we have finished working through the backlogged TSN's now
2324 * time to reset streams. 1: call reset function. 2: free
2325 * pending_reply space 3: distribute any chunks in
2326 * pending_reply_queue.
2327 */
t00fcxen0f0d87f2012-09-07 13:42:20 +00002328 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
tuexendd729232011-11-01 23:04:43 +00002329 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
Michael Tüxen6b4d2922015-07-22 13:55:48 +02002330 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
tuexendd729232011-11-01 23:04:43 +00002331 SCTP_FREE(liste, SCTP_M_STRESET);
2332 /*sa_ignore FREED_MEMORY*/
2333 liste = TAILQ_FIRST(&asoc->resetHead);
2334 if (TAILQ_EMPTY(&asoc->resetHead)) {
2335 /* All can be removed */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002336 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2337 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2338 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
tuexendd729232011-11-01 23:04:43 +00002339 if (*abort_flag) {
tuexen63fc0bb2011-12-27 12:24:52 +00002340 return (0);
tuexendd729232011-11-01 23:04:43 +00002341 }
Michael Tuexene411f662016-12-17 23:36:21 +01002342 if (need_reasm_check) {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002343 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
Michael Tuexene411f662016-12-17 23:36:21 +01002344 need_reasm_check = 0;
2345 }
tuexendd729232011-11-01 23:04:43 +00002346 }
2347 } else {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002348 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2349 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
tuexendd729232011-11-01 23:04:43 +00002350 break;
2351 }
2352 /*
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002353 * if control->sinfo_tsn is <= liste->tsn we can
tuexendd729232011-11-01 23:04:43 +00002354 * process it which is the NOT of
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002355 * control->sinfo_tsn > liste->tsn
tuexendd729232011-11-01 23:04:43 +00002356 */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002357 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2358 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
tuexendd729232011-11-01 23:04:43 +00002359 if (*abort_flag) {
tuexen63fc0bb2011-12-27 12:24:52 +00002360 return (0);
tuexendd729232011-11-01 23:04:43 +00002361 }
Michael Tuexene411f662016-12-17 23:36:21 +01002362 if (need_reasm_check) {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002363 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
Michael Tuexene411f662016-12-17 23:36:21 +01002364 need_reasm_check = 0;
2365 }
tuexendd729232011-11-01 23:04:43 +00002366 }
2367 }
tuexendd729232011-11-01 23:04:43 +00002368 }
2369 return (1);
2370}
2371
Michael Tuexen81055222016-03-23 14:40:10 +01002372static const int8_t sctp_map_lookup_tab[256] = {
tuexendd729232011-11-01 23:04:43 +00002373 0, 1, 0, 2, 0, 1, 0, 3,
2374 0, 1, 0, 2, 0, 1, 0, 4,
2375 0, 1, 0, 2, 0, 1, 0, 3,
2376 0, 1, 0, 2, 0, 1, 0, 5,
2377 0, 1, 0, 2, 0, 1, 0, 3,
2378 0, 1, 0, 2, 0, 1, 0, 4,
2379 0, 1, 0, 2, 0, 1, 0, 3,
2380 0, 1, 0, 2, 0, 1, 0, 6,
2381 0, 1, 0, 2, 0, 1, 0, 3,
2382 0, 1, 0, 2, 0, 1, 0, 4,
2383 0, 1, 0, 2, 0, 1, 0, 3,
2384 0, 1, 0, 2, 0, 1, 0, 5,
2385 0, 1, 0, 2, 0, 1, 0, 3,
2386 0, 1, 0, 2, 0, 1, 0, 4,
2387 0, 1, 0, 2, 0, 1, 0, 3,
2388 0, 1, 0, 2, 0, 1, 0, 7,
2389 0, 1, 0, 2, 0, 1, 0, 3,
2390 0, 1, 0, 2, 0, 1, 0, 4,
2391 0, 1, 0, 2, 0, 1, 0, 3,
2392 0, 1, 0, 2, 0, 1, 0, 5,
2393 0, 1, 0, 2, 0, 1, 0, 3,
2394 0, 1, 0, 2, 0, 1, 0, 4,
2395 0, 1, 0, 2, 0, 1, 0, 3,
2396 0, 1, 0, 2, 0, 1, 0, 6,
2397 0, 1, 0, 2, 0, 1, 0, 3,
2398 0, 1, 0, 2, 0, 1, 0, 4,
2399 0, 1, 0, 2, 0, 1, 0, 3,
2400 0, 1, 0, 2, 0, 1, 0, 5,
2401 0, 1, 0, 2, 0, 1, 0, 3,
2402 0, 1, 0, 2, 0, 1, 0, 4,
2403 0, 1, 0, 2, 0, 1, 0, 3,
2404 0, 1, 0, 2, 0, 1, 0, 8
2405};
2406
2407
2408void
2409sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2410{
2411 /*
2412 * Now we also need to check the mapping array in a couple of ways.
2413 * 1) Did we move the cum-ack point?
2414 *
2415 * When you first glance at this you might think
Michael Tuexen34488e72016-05-03 22:11:59 +02002416 * that all entries that make up the position
tuexendd729232011-11-01 23:04:43 +00002417 * of the cum-ack would be in the nr-mapping array
2418 * only.. i.e. things up to the cum-ack are always
2419 * deliverable. Thats true with one exception, when
2420 * its a fragmented message we may not deliver the data
2421 * until some threshold (or all of it) is in place. So
2422 * we must OR the nr_mapping_array and mapping_array to
2423 * get a true picture of the cum-ack.
2424 */
2425 struct sctp_association *asoc;
2426 int at;
2427 uint8_t val;
2428 int slide_from, slide_end, lgap, distance;
2429 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2430
2431 asoc = &stcb->asoc;
tuexendd729232011-11-01 23:04:43 +00002432
2433 old_cumack = asoc->cumulative_tsn;
2434 old_base = asoc->mapping_array_base_tsn;
2435 old_highest = asoc->highest_tsn_inside_map;
2436 /*
2437 * We could probably improve this a small bit by calculating the
2438 * offset of the current cum-ack as the starting point.
2439 */
2440 at = 0;
2441 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2442 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2443 if (val == 0xff) {
2444 at += 8;
2445 } else {
2446 /* there is a 0 bit */
2447 at += sctp_map_lookup_tab[val];
2448 break;
2449 }
2450 }
2451 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at-1);
2452
2453 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2454 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2455#ifdef INVARIANTS
2456 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2457 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2458#else
2459 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2460 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2461 sctp_print_mapping_array(asoc);
2462 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2463 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2464 }
2465 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2466 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2467#endif
2468 }
2469 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2470 highest_tsn = asoc->highest_tsn_inside_nr_map;
2471 } else {
2472 highest_tsn = asoc->highest_tsn_inside_map;
2473 }
2474 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2475 /* The complete array was completed by a single FR */
2476 /* highest becomes the cum-ack */
2477 int clr;
2478#ifdef INVARIANTS
2479 unsigned int i;
2480#endif
2481
2482 /* clear the array */
2483 clr = ((at+7) >> 3);
2484 if (clr > asoc->mapping_array_size) {
2485 clr = asoc->mapping_array_size;
2486 }
2487 memset(asoc->mapping_array, 0, clr);
2488 memset(asoc->nr_mapping_array, 0, clr);
2489#ifdef INVARIANTS
2490 for (i = 0; i < asoc->mapping_array_size; i++) {
2491 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
tuexencb5fe8d2012-05-04 09:50:27 +00002492 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
tuexendd729232011-11-01 23:04:43 +00002493 sctp_print_mapping_array(asoc);
2494 }
2495 }
2496#endif
2497 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2498 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2499 } else if (at >= 8) {
2500 /* we can slide the mapping array down */
2501 /* slide_from holds where we hit the first NON 0xff byte */
2502
2503 /*
2504 * now calculate the ceiling of the move using our highest
2505 * TSN value
2506 */
2507 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2508 slide_end = (lgap >> 3);
2509 if (slide_end < slide_from) {
2510 sctp_print_mapping_array(asoc);
2511#ifdef INVARIANTS
2512 panic("impossible slide");
2513#else
Michael Tuexeneccb4be2016-04-18 08:58:59 +02002514 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
tuexencb5fe8d2012-05-04 09:50:27 +00002515 lgap, slide_end, slide_from, at);
tuexendd729232011-11-01 23:04:43 +00002516 return;
2517#endif
2518 }
2519 if (slide_end > asoc->mapping_array_size) {
2520#ifdef INVARIANTS
2521 panic("would overrun buffer");
2522#else
Michael Tuexeneccb4be2016-04-18 08:58:59 +02002523 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
tuexencb5fe8d2012-05-04 09:50:27 +00002524 asoc->mapping_array_size, slide_end);
tuexendd729232011-11-01 23:04:43 +00002525 slide_end = asoc->mapping_array_size;
2526#endif
2527 }
2528 distance = (slide_end - slide_from) + 1;
2529 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2530 sctp_log_map(old_base, old_cumack, old_highest,
2531 SCTP_MAP_PREPARE_SLIDE);
2532 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2533 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2534 }
2535 if (distance + slide_from > asoc->mapping_array_size ||
2536 distance < 0) {
2537 /*
2538 * Here we do NOT slide forward the array so that
2539 * hopefully when more data comes in to fill it up
2540 * we will be able to slide it forward. Really I
2541 * don't think this should happen :-0
2542 */
2543
2544 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2545 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2546 (uint32_t) asoc->mapping_array_size,
2547 SCTP_MAP_SLIDE_NONE);
2548 }
2549 } else {
2550 int ii;
2551
2552 for (ii = 0; ii < distance; ii++) {
2553 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2554 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2555
2556 }
2557 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2558 asoc->mapping_array[ii] = 0;
2559 asoc->nr_mapping_array[ii] = 0;
2560 }
2561 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2562 asoc->highest_tsn_inside_map += (slide_from << 3);
2563 }
2564 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2565 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2566 }
2567 asoc->mapping_array_base_tsn += (slide_from << 3);
2568 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2569 sctp_log_map(asoc->mapping_array_base_tsn,
2570 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2571 SCTP_MAP_SLIDE_RESULT);
2572 }
2573 }
2574 }
2575}
2576
2577void
tuexen9784e9a2011-12-18 13:04:23 +00002578sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
tuexendd729232011-11-01 23:04:43 +00002579{
2580 struct sctp_association *asoc;
2581 uint32_t highest_tsn;
Michael Tuexen753e4452016-12-09 19:20:11 +01002582 int is_a_gap;
tuexen9784e9a2011-12-18 13:04:23 +00002583
Michael Tuexen753e4452016-12-09 19:20:11 +01002584 sctp_slide_mapping_arrays(stcb);
tuexendd729232011-11-01 23:04:43 +00002585 asoc = &stcb->asoc;
2586 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2587 highest_tsn = asoc->highest_tsn_inside_nr_map;
2588 } else {
2589 highest_tsn = asoc->highest_tsn_inside_map;
2590 }
Michael Tuexen753e4452016-12-09 19:20:11 +01002591 /* Is there a gap now? */
2592 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
tuexendd729232011-11-01 23:04:43 +00002593
2594 /*
2595 * Now we need to see if we need to queue a sack or just start the
2596 * timer (if allowed).
2597 */
Michael Tuexen348a36c2018-08-13 16:24:47 +02002598 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
tuexendd729232011-11-01 23:04:43 +00002599 /*
2600 * Ok special case, in SHUTDOWN-SENT case. here we
2601 * maker sure SACK timer is off and instead send a
2602 * SHUTDOWN and a SACK
2603 */
2604 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2605 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
t00fcxen0057a6d2015-05-28 16:42:49 +00002606 stcb->sctp_ep, stcb, NULL,
Michael Tuexen91565952020-02-03 23:23:28 +01002607 SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
tuexendd729232011-11-01 23:04:43 +00002608 }
tuexen15f99d82012-04-19 16:08:38 +00002609 sctp_send_shutdown(stcb,
Michael Tuexen753e4452016-12-09 19:20:11 +01002610 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2611 if (is_a_gap) {
2612 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2613 }
tuexendd729232011-11-01 23:04:43 +00002614 } else {
tuexendd729232011-11-01 23:04:43 +00002615 /*
2616 * CMT DAC algorithm: increase number of packets
2617 * received since last ack
2618 */
2619 stcb->asoc.cmt_dac_pkts_rcvd++;
tuexen15f99d82012-04-19 16:08:38 +00002620
tuexendd729232011-11-01 23:04:43 +00002621 if ((stcb->asoc.send_sack == 1) || /* We need to send a SACK */
2622 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2623 * longer is one */
2624 (stcb->asoc.numduptsns) || /* we have dup's */
2625 (is_a_gap) || /* is still a gap */
2626 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2627 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2628 ) {
2629
tuexen63fc0bb2011-12-27 12:24:52 +00002630 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
tuexendd729232011-11-01 23:04:43 +00002631 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2632 (stcb->asoc.send_sack == 0) &&
2633 (stcb->asoc.numduptsns == 0) &&
2634 (stcb->asoc.delayed_ack) &&
2635 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2636
2637 /*
2638 * CMT DAC algorithm: With CMT,
2639 * delay acks even in the face of
2640
2641 * reordering. Therefore, if acks
2642 * that do not have to be sent
2643 * because of the above reasons,
2644 * will be delayed. That is, acks
2645 * that would have been sent due to
2646 * gap reports will be delayed with
2647 * DAC. Start the delayed ack timer.
2648 */
2649 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2650 stcb->sctp_ep, stcb, NULL);
2651 } else {
2652 /*
2653 * Ok we must build a SACK since the
2654 * timer is pending, we got our
2655 * first packet OR there are gaps or
2656 * duplicates.
2657 */
Michael Tuexen61f66832020-02-09 23:15:19 +01002658 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2659 SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
tuexendd729232011-11-01 23:04:43 +00002660 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2661 }
2662 } else {
2663 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2664 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2665 stcb->sctp_ep, stcb, NULL);
2666 }
2667 }
2668 }
2669}
2670
tuexendd729232011-11-01 23:04:43 +00002671int
2672sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
Michael Tüxen9843e062015-08-02 18:10:36 +02002673 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2674 struct sctp_nets *net, uint32_t *high_tsn)
tuexendd729232011-11-01 23:04:43 +00002675{
Michael Tuexene5001952016-04-17 19:25:27 +02002676 struct sctp_chunkhdr *ch, chunk_buf;
tuexendd729232011-11-01 23:04:43 +00002677 struct sctp_association *asoc;
2678 int num_chunks = 0; /* number of control chunks processed */
2679 int stop_proc = 0;
Michael Tuexen48b98022017-10-18 23:12:24 +02002680 int break_flag, last_chunk;
tuexendd729232011-11-01 23:04:43 +00002681 int abort_flag = 0, was_a_gap;
2682 struct mbuf *m;
2683 uint32_t highest_tsn;
Michael Tuexen48b98022017-10-18 23:12:24 +02002684 uint16_t chk_length;
tuexendd729232011-11-01 23:04:43 +00002685
2686 /* set the rwnd */
2687 sctp_set_rwnd(stcb, &stcb->asoc);
2688
2689 m = *mm;
2690 SCTP_TCB_LOCK_ASSERT(stcb);
2691 asoc = &stcb->asoc;
2692 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2693 highest_tsn = asoc->highest_tsn_inside_nr_map;
2694 } else {
2695 highest_tsn = asoc->highest_tsn_inside_map;
2696 }
2697 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2698 /*
2699 * setup where we got the last DATA packet from for any SACK that
2700 * may need to go out. Don't bump the net. This is done ONLY when a
2701 * chunk is assigned.
2702 */
2703 asoc->last_data_chunk_from = net;
2704
tuexendd729232011-11-01 23:04:43 +00002705 /*-
2706 * Now before we proceed we must figure out if this is a wasted
2707 * cluster... i.e. it is a small packet sent in and yet the driver
2708 * underneath allocated a full cluster for it. If so we must copy it
2709 * to a smaller mbuf and free up the cluster mbuf. This will help
Michael Tuexen3b4263d2020-06-06 19:52:50 +02002710 * with cluster starvation.
tuexendd729232011-11-01 23:04:43 +00002711 */
2712 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2713 /* we only handle mbufs that are singletons.. not chains */
t00fcxen23c2b8f2012-12-10 20:15:50 +00002714 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
tuexendd729232011-11-01 23:04:43 +00002715 if (m) {
2716 /* ok lets see if we can copy the data up */
2717 caddr_t *from, *to;
2718 /* get the pointers and copy */
2719 to = mtod(m, caddr_t *);
2720 from = mtod((*mm), caddr_t *);
2721 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2722 /* copy the length and free up the old */
2723 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2724 sctp_m_freem(*mm);
Michael Tuexen34488e72016-05-03 22:11:59 +02002725 /* success, back copy */
tuexendd729232011-11-01 23:04:43 +00002726 *mm = m;
2727 } else {
2728 /* We are in trouble in the mbuf world .. yikes */
2729 m = *mm;
2730 }
2731 }
tuexendd729232011-11-01 23:04:43 +00002732 /* get pointer to the first chunk header */
Michael Tuexene5001952016-04-17 19:25:27 +02002733 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
Michael Tuexen97aebbe2017-10-18 22:22:05 +02002734 sizeof(struct sctp_chunkhdr),
2735 (uint8_t *)&chunk_buf);
tuexendd729232011-11-01 23:04:43 +00002736 if (ch == NULL) {
2737 return (1);
2738 }
2739 /*
2740 * process all DATA chunks...
2741 */
2742 *high_tsn = asoc->cumulative_tsn;
2743 break_flag = 0;
2744 asoc->data_pkts_seen++;
2745 while (stop_proc == 0) {
2746 /* validate chunk length */
Michael Tuexene5001952016-04-17 19:25:27 +02002747 chk_length = ntohs(ch->chunk_length);
tuexendd729232011-11-01 23:04:43 +00002748 if (length - *offset < chk_length) {
2749 /* all done, mutulated chunk */
2750 stop_proc = 1;
tuexen63fc0bb2011-12-27 12:24:52 +00002751 continue;
tuexendd729232011-11-01 23:04:43 +00002752 }
Michael Tuexene5001952016-04-17 19:25:27 +02002753 if ((asoc->idata_supported == 1) &&
2754 (ch->chunk_type == SCTP_DATA)) {
2755 struct mbuf *op_err;
2756 char msg[SCTP_DIAG_INFO_LEN];
2757
Michael Tuexenedd369d2020-05-19 09:42:15 +02002758 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
Michael Tuexene5001952016-04-17 19:25:27 +02002759 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexen91565952020-02-03 23:23:28 +01002760 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
Michael Tuexene5001952016-04-17 19:25:27 +02002761 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2762 return (2);
2763 }
2764 if ((asoc->idata_supported == 0) &&
2765 (ch->chunk_type == SCTP_IDATA)) {
2766 struct mbuf *op_err;
2767 char msg[SCTP_DIAG_INFO_LEN];
2768
Michael Tuexenedd369d2020-05-19 09:42:15 +02002769 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
Michael Tuexene5001952016-04-17 19:25:27 +02002770 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexen91565952020-02-03 23:23:28 +01002771 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
Michael Tuexene5001952016-04-17 19:25:27 +02002772 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2773 return (2);
2774 }
2775 if ((ch->chunk_type == SCTP_DATA) ||
2776 (ch->chunk_type == SCTP_IDATA)) {
Michael Tuexen48b98022017-10-18 23:12:24 +02002777 uint16_t clen;
Michael Tuexen66c84932016-04-18 11:42:41 +02002778
Michael Tuexene5001952016-04-17 19:25:27 +02002779 if (ch->chunk_type == SCTP_DATA) {
2780 clen = sizeof(struct sctp_data_chunk);
2781 } else {
2782 clen = sizeof(struct sctp_idata_chunk);
2783 }
Michael Tuexen66c84932016-04-18 11:42:41 +02002784 if (chk_length < clen) {
tuexendd729232011-11-01 23:04:43 +00002785 /*
2786 * Need to send an abort since we had a
2787 * invalid data chunk.
2788 */
2789 struct mbuf *op_err;
t00fcxen08f9ff92014-03-16 13:38:54 +00002790 char msg[SCTP_DIAG_INFO_LEN];
tuexendd729232011-11-01 23:04:43 +00002791
Michael Tuexenedd369d2020-05-19 09:42:15 +02002792 SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2793 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2794 chk_length);
t00fcxen08f9ff92014-03-16 13:38:54 +00002795 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexen91565952020-02-03 23:23:28 +01002796 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
Michael Tüxen9843e062015-08-02 18:10:36 +02002797 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
t00fcxen227f8db2014-04-19 19:44:25 +00002798 return (2);
2799 }
tuexendd729232011-11-01 23:04:43 +00002800#ifdef SCTP_AUDITING_ENABLED
2801 sctp_audit_log(0xB1, 0);
2802#endif
2803 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2804 last_chunk = 1;
2805 } else {
2806 last_chunk = 0;
2807 }
Michael Tuexend3331282020-02-03 23:14:00 +01002808 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
tuexendd729232011-11-01 23:04:43 +00002809 chk_length, net, high_tsn, &abort_flag, &break_flag,
Michael Tuexene5001952016-04-17 19:25:27 +02002810 last_chunk, ch->chunk_type)) {
tuexendd729232011-11-01 23:04:43 +00002811 num_chunks++;
2812 }
2813 if (abort_flag)
2814 return (2);
2815
2816 if (break_flag) {
2817 /*
2818 * Set because of out of rwnd space and no
2819 * drop rep space left.
2820 */
2821 stop_proc = 1;
tuexen63fc0bb2011-12-27 12:24:52 +00002822 continue;
tuexendd729232011-11-01 23:04:43 +00002823 }
2824 } else {
2825 /* not a data chunk in the data region */
Michael Tuexene5001952016-04-17 19:25:27 +02002826 switch (ch->chunk_type) {
tuexendd729232011-11-01 23:04:43 +00002827 case SCTP_INITIATION:
2828 case SCTP_INITIATION_ACK:
2829 case SCTP_SELECTIVE_ACK:
tuexen63fc0bb2011-12-27 12:24:52 +00002830 case SCTP_NR_SELECTIVE_ACK:
tuexendd729232011-11-01 23:04:43 +00002831 case SCTP_HEARTBEAT_REQUEST:
2832 case SCTP_HEARTBEAT_ACK:
2833 case SCTP_ABORT_ASSOCIATION:
2834 case SCTP_SHUTDOWN:
2835 case SCTP_SHUTDOWN_ACK:
2836 case SCTP_OPERATION_ERROR:
2837 case SCTP_COOKIE_ECHO:
2838 case SCTP_COOKIE_ACK:
2839 case SCTP_ECN_ECHO:
2840 case SCTP_ECN_CWR:
2841 case SCTP_SHUTDOWN_COMPLETE:
2842 case SCTP_AUTHENTICATION:
2843 case SCTP_ASCONF_ACK:
2844 case SCTP_PACKET_DROPPED:
2845 case SCTP_STREAM_RESET:
2846 case SCTP_FORWARD_CUM_TSN:
2847 case SCTP_ASCONF:
Michael Tuexen0ec21502016-05-12 18:39:01 +02002848 {
tuexendd729232011-11-01 23:04:43 +00002849 /*
2850 * Now, what do we do with KNOWN chunks that
2851 * are NOT in the right place?
2852 *
2853 * For now, I do nothing but ignore them. We
2854 * may later want to add sysctl stuff to
2855 * switch out and do either an ABORT() or
2856 * possibly process them.
2857 */
Michael Tuexen0ec21502016-05-12 18:39:01 +02002858 struct mbuf *op_err;
2859 char msg[SCTP_DIAG_INFO_LEN];
tuexendd729232011-11-01 23:04:43 +00002860
Michael Tuexenedd369d2020-05-19 09:42:15 +02002861 SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2862 ch->chunk_type);
Michael Tuexen0ec21502016-05-12 18:39:01 +02002863 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2864 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2865 return (2);
2866 }
tuexendd729232011-11-01 23:04:43 +00002867 default:
Michael Tuexen97aebbe2017-10-18 22:22:05 +02002868 /*
2869 * Unknown chunk type: use bit rules after
2870 * checking length
2871 */
2872 if (chk_length < sizeof(struct sctp_chunkhdr)) {
2873 /*
2874 * Need to send an abort since we had a
2875 * invalid chunk.
2876 */
2877 struct mbuf *op_err;
2878 char msg[SCTP_DIAG_INFO_LEN];
2879
Michael Tuexenedd369d2020-05-19 09:42:15 +02002880 SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
Michael Tuexen97aebbe2017-10-18 22:22:05 +02002881 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexen91565952020-02-03 23:23:28 +01002882 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
Michael Tuexen97aebbe2017-10-18 22:22:05 +02002883 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2884 return (2);
2885 }
Michael Tuexene5001952016-04-17 19:25:27 +02002886 if (ch->chunk_type & 0x40) {
tuexendd729232011-11-01 23:04:43 +00002887 /* Add a error report to the queue */
Michael Tuexenf39c4292015-09-12 19:39:48 +02002888 struct mbuf *op_err;
2889 struct sctp_gen_error_cause *cause;
tuexendd729232011-11-01 23:04:43 +00002890
Michael Tuexenf39c4292015-09-12 19:39:48 +02002891 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2892 0, M_NOWAIT, 1, MT_DATA);
2893 if (op_err != NULL) {
2894 cause = mtod(op_err, struct sctp_gen_error_cause *);
2895 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
Michael Tuexen1ce9b132016-03-25 15:03:49 +01002896 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
Michael Tuexenf39c4292015-09-12 19:39:48 +02002897 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2898 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2899 if (SCTP_BUF_NEXT(op_err) != NULL) {
2900 sctp_queue_op_err(stcb, op_err);
tuexendd729232011-11-01 23:04:43 +00002901 } else {
Michael Tuexenf39c4292015-09-12 19:39:48 +02002902 sctp_m_freem(op_err);
tuexendd729232011-11-01 23:04:43 +00002903 }
2904 }
2905 }
Michael Tuexene5001952016-04-17 19:25:27 +02002906 if ((ch->chunk_type & 0x80) == 0) {
tuexendd729232011-11-01 23:04:43 +00002907 /* discard the rest of this packet */
2908 stop_proc = 1;
2909 } /* else skip this bad chunk and
2910 * continue... */
2911 break;
tuexen63fc0bb2011-12-27 12:24:52 +00002912 } /* switch of chunk type */
tuexendd729232011-11-01 23:04:43 +00002913 }
2914 *offset += SCTP_SIZE32(chk_length);
2915 if ((*offset >= length) || stop_proc) {
2916 /* no more data left in the mbuf chain */
2917 stop_proc = 1;
2918 continue;
2919 }
Michael Tuexene5001952016-04-17 19:25:27 +02002920 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
Michael Tuexen97aebbe2017-10-18 22:22:05 +02002921 sizeof(struct sctp_chunkhdr),
2922 (uint8_t *)&chunk_buf);
tuexendd729232011-11-01 23:04:43 +00002923 if (ch == NULL) {
2924 *offset = length;
2925 stop_proc = 1;
tuexen63fc0bb2011-12-27 12:24:52 +00002926 continue;
tuexendd729232011-11-01 23:04:43 +00002927 }
tuexen63fc0bb2011-12-27 12:24:52 +00002928 }
tuexendd729232011-11-01 23:04:43 +00002929 if (break_flag) {
2930 /*
2931 * we need to report rwnd overrun drops.
2932 */
tuexen3caef192012-06-24 23:24:06 +00002933 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
tuexendd729232011-11-01 23:04:43 +00002934 }
2935 if (num_chunks) {
2936 /*
2937 * Did we get data, if so update the time for auto-close and
2938 * give peer credit for being alive.
2939 */
2940 SCTP_STAT_INCR(sctps_recvpktwithdata);
2941 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2942 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2943 stcb->asoc.overall_error_count,
2944 0,
2945 SCTP_FROM_SCTP_INDATA,
2946 __LINE__);
2947 }
2948 stcb->asoc.overall_error_count = 0;
2949 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2950 }
2951 /* now service all of the reassm queue if needed */
Michael Tuexen348a36c2018-08-13 16:24:47 +02002952 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
tuexendd729232011-11-01 23:04:43 +00002953 /* Assure that we ack right away */
2954 stcb->asoc.send_sack = 1;
2955 }
2956 /* Start a sack timer or QUEUE a SACK for sending */
tuexen9784e9a2011-12-18 13:04:23 +00002957 sctp_sack_check(stcb, was_a_gap);
tuexendd729232011-11-01 23:04:43 +00002958 return (0);
2959}
2960
2961static int
2962sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2963 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2964 int *num_frs,
2965 uint32_t *biggest_newly_acked_tsn,
2966 uint32_t *this_sack_lowest_newack,
tuexen9784e9a2011-12-18 13:04:23 +00002967 int *rto_ok)
tuexendd729232011-11-01 23:04:43 +00002968{
2969 struct sctp_tmit_chunk *tp1;
2970 unsigned int theTSN;
2971 int j, wake_him = 0, circled = 0;
2972
2973 /* Recover the tp1 we last saw */
2974 tp1 = *p_tp1;
2975 if (tp1 == NULL) {
2976 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2977 }
2978 for (j = frag_strt; j <= frag_end; j++) {
2979 theTSN = j + last_tsn;
2980 while (tp1) {
2981 if (tp1->rec.data.doing_fast_retransmit)
2982 (*num_frs) += 1;
2983
2984 /*-
2985 * CMT: CUCv2 algorithm. For each TSN being
2986 * processed from the sent queue, track the
2987 * next expected pseudo-cumack, or
2988 * rtx_pseudo_cumack, if required. Separate
2989 * cumack trackers for first transmissions,
2990 * and retransmissions.
2991 */
t00fcxenf95cdf42015-03-24 15:12:04 +00002992 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2993 (tp1->whoTo->find_pseudo_cumack == 1) &&
tuexendd729232011-11-01 23:04:43 +00002994 (tp1->snd_count == 1)) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01002995 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00002996 tp1->whoTo->find_pseudo_cumack = 0;
2997 }
t00fcxenf95cdf42015-03-24 15:12:04 +00002998 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2999 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
tuexendd729232011-11-01 23:04:43 +00003000 (tp1->snd_count > 1)) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003001 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00003002 tp1->whoTo->find_rtx_pseudo_cumack = 0;
3003 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01003004 if (tp1->rec.data.tsn == theTSN) {
tuexendd729232011-11-01 23:04:43 +00003005 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3006 /*-
3007 * must be held until
3008 * cum-ack passes
3009 */
3010 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3011 /*-
3012 * If it is less than RESEND, it is
3013 * now no-longer in flight.
3014 * Higher values may already be set
3015 * via previous Gap Ack Blocks...
3016 * i.e. ACKED or RESEND.
3017 */
Michael Tuexen00657ac2016-12-07 21:53:26 +01003018 if (SCTP_TSN_GT(tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003019 *biggest_newly_acked_tsn)) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003020 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00003021 }
3022 /*-
3023 * CMT: SFR algo (and HTNA) - set
3024 * saw_newack to 1 for dest being
3025 * newly acked. update
3026 * this_sack_highest_newack if
3027 * appropriate.
3028 */
3029 if (tp1->rec.data.chunk_was_revoked == 0)
3030 tp1->whoTo->saw_newack = 1;
3031
Michael Tuexen00657ac2016-12-07 21:53:26 +01003032 if (SCTP_TSN_GT(tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003033 tp1->whoTo->this_sack_highest_newack)) {
3034 tp1->whoTo->this_sack_highest_newack =
Michael Tuexen00657ac2016-12-07 21:53:26 +01003035 tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00003036 }
3037 /*-
3038 * CMT DAC algo: also update
3039 * this_sack_lowest_newack
3040 */
3041 if (*this_sack_lowest_newack == 0) {
3042 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3043 sctp_log_sack(*this_sack_lowest_newack,
3044 last_tsn,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003045 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003046 0,
3047 0,
3048 SCTP_LOG_TSN_ACKED);
3049 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01003050 *this_sack_lowest_newack = tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00003051 }
3052 /*-
3053 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3054 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3055 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3056 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3057 * Separate pseudo_cumack trackers for first transmissions and
3058 * retransmissions.
3059 */
Michael Tuexen00657ac2016-12-07 21:53:26 +01003060 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
tuexendd729232011-11-01 23:04:43 +00003061 if (tp1->rec.data.chunk_was_revoked == 0) {
3062 tp1->whoTo->new_pseudo_cumack = 1;
3063 }
3064 tp1->whoTo->find_pseudo_cumack = 1;
3065 }
3066 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003067 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
tuexendd729232011-11-01 23:04:43 +00003068 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01003069 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
tuexendd729232011-11-01 23:04:43 +00003070 if (tp1->rec.data.chunk_was_revoked == 0) {
3071 tp1->whoTo->new_pseudo_cumack = 1;
3072 }
3073 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3074 }
3075 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3076 sctp_log_sack(*biggest_newly_acked_tsn,
3077 last_tsn,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003078 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003079 frag_strt,
3080 frag_end,
3081 SCTP_LOG_TSN_ACKED);
3082 }
3083 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3084 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3085 tp1->whoTo->flight_size,
3086 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01003087 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003088 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00003089 }
3090 sctp_flight_size_decrease(tp1);
3091 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3092 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3093 tp1);
3094 }
3095 sctp_total_flight_decrease(stcb, tp1);
3096
3097 tp1->whoTo->net_ack += tp1->send_size;
3098 if (tp1->snd_count < 2) {
3099 /*-
Michael Tuexenc51af972018-08-12 15:32:55 +02003100 * True non-retransmitted chunk
tuexendd729232011-11-01 23:04:43 +00003101 */
3102 tp1->whoTo->net_ack2 += tp1->send_size;
3103
3104 /*-
3105 * update RTO too ?
3106 */
3107 if (tp1->do_rtt) {
Michael Tuexenb7ed78b2019-09-22 12:48:36 +02003108 if (*rto_ok &&
3109 sctp_calculate_rto(stcb,
3110 &stcb->asoc,
3111 tp1->whoTo,
3112 &tp1->sent_rcv_time,
3113 SCTP_RTT_FROM_DATA)) {
tuexendd729232011-11-01 23:04:43 +00003114 *rto_ok = 0;
3115 }
3116 if (tp1->whoTo->rto_needed == 0) {
3117 tp1->whoTo->rto_needed = 1;
3118 }
3119 tp1->do_rtt = 0;
3120 }
3121 }
3122
3123 }
3124 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003125 if (SCTP_TSN_GT(tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003126 stcb->asoc.this_sack_highest_gap)) {
3127 stcb->asoc.this_sack_highest_gap =
Michael Tuexen00657ac2016-12-07 21:53:26 +01003128 tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00003129 }
3130 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3131 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3132#ifdef SCTP_AUDITING_ENABLED
3133 sctp_audit_log(0xB2,
3134 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3135#endif
3136 }
3137 }
3138 /*-
3139 * All chunks NOT UNSENT fall through here and are marked
3140 * (leave PR-SCTP ones that are to skip alone though)
3141 */
t00fcxen9ad90772012-11-07 22:19:57 +00003142 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
t00fcxen8fcc5142012-11-16 19:46:12 +00003143 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
tuexendd729232011-11-01 23:04:43 +00003144 tp1->sent = SCTP_DATAGRAM_MARKED;
t00fcxen9ad90772012-11-07 22:19:57 +00003145 }
tuexendd729232011-11-01 23:04:43 +00003146 if (tp1->rec.data.chunk_was_revoked) {
3147 /* deflate the cwnd */
3148 tp1->whoTo->cwnd -= tp1->book_size;
3149 tp1->rec.data.chunk_was_revoked = 0;
3150 }
3151 /* NR Sack code here */
t00fcxen8fcc5142012-11-16 19:46:12 +00003152 if (nr_sacking &&
3153 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003154 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3155 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
t00fcxen8fcc5142012-11-16 19:46:12 +00003156#ifdef INVARIANTS
3157 } else {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003158 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
t00fcxen8fcc5142012-11-16 19:46:12 +00003159#endif
3160 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01003161 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3162 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3163 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
Michael Tuexenc0a12d12015-12-03 16:30:24 +01003164 stcb->asoc.trigger_reset = 1;
3165 }
t00fcxen8fcc5142012-11-16 19:46:12 +00003166 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
tuexendd729232011-11-01 23:04:43 +00003167 if (tp1->data) {
3168 /* sa_ignore NO_NULL_CHK */
3169 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3170 sctp_m_freem(tp1->data);
3171 tp1->data = NULL;
3172 }
3173 wake_him++;
3174 }
3175 }
3176 break;
Michael Tuexen00657ac2016-12-07 21:53:26 +01003177 } /* if (tp1->tsn == theTSN) */
3178 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
tuexendd729232011-11-01 23:04:43 +00003179 break;
3180 }
3181 tp1 = TAILQ_NEXT(tp1, sctp_next);
3182 if ((tp1 == NULL) && (circled == 0)) {
3183 circled++;
3184 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3185 }
3186 } /* end while (tp1) */
3187 if (tp1 == NULL) {
3188 circled = 0;
3189 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3190 }
3191 /* In case the fragments were not in order we must reset */
3192 } /* end for (j = fragStart */
3193 *p_tp1 = tp1;
3194 return (wake_him); /* Return value only used for nr-sack */
3195}
3196
3197
3198static int
3199sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3200 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3201 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
tuexen9784e9a2011-12-18 13:04:23 +00003202 int num_seg, int num_nr_seg, int *rto_ok)
tuexendd729232011-11-01 23:04:43 +00003203{
3204 struct sctp_gap_ack_block *frag, block;
3205 struct sctp_tmit_chunk *tp1;
3206 int i;
3207 int num_frs = 0;
3208 int chunk_freed;
3209 int non_revocable;
3210 uint16_t frag_strt, frag_end, prev_frag_end;
3211
3212 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3213 prev_frag_end = 0;
3214 chunk_freed = 0;
3215
3216 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3217 if (i == num_seg) {
3218 prev_frag_end = 0;
3219 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3220 }
3221 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3222 sizeof(struct sctp_gap_ack_block), (uint8_t *) &block);
3223 *offset += sizeof(block);
3224 if (frag == NULL) {
3225 return (chunk_freed);
3226 }
3227 frag_strt = ntohs(frag->start);
3228 frag_end = ntohs(frag->end);
3229
3230 if (frag_strt > frag_end) {
3231 /* This gap report is malformed, skip it. */
3232 continue;
3233 }
3234 if (frag_strt <= prev_frag_end) {
3235 /* This gap report is not in order, so restart. */
3236 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3237 }
3238 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3239 *biggest_tsn_acked = last_tsn + frag_end;
3240 }
3241 if (i < num_seg) {
3242 non_revocable = 0;
3243 } else {
3244 non_revocable = 1;
3245 }
3246 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3247 non_revocable, &num_frs, biggest_newly_acked_tsn,
tuexen9784e9a2011-12-18 13:04:23 +00003248 this_sack_lowest_newack, rto_ok)) {
tuexendd729232011-11-01 23:04:43 +00003249 chunk_freed = 1;
3250 }
3251 prev_frag_end = frag_end;
3252 }
3253 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3254 if (num_frs)
3255 sctp_log_fr(*biggest_tsn_acked,
3256 *biggest_newly_acked_tsn,
3257 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3258 }
3259 return (chunk_freed);
3260}
3261
3262static void
3263sctp_check_for_revoked(struct sctp_tcb *stcb,
3264 struct sctp_association *asoc, uint32_t cumack,
3265 uint32_t biggest_tsn_acked)
3266{
3267 struct sctp_tmit_chunk *tp1;
tuexendd729232011-11-01 23:04:43 +00003268
3269 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003270 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
tuexendd729232011-11-01 23:04:43 +00003271 /*
3272 * ok this guy is either ACK or MARKED. If it is
3273 * ACKED it has been previously acked but not this
3274 * time i.e. revoked. If it is MARKED it was ACK'ed
3275 * again.
3276 */
Michael Tuexen00657ac2016-12-07 21:53:26 +01003277 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
tuexendd729232011-11-01 23:04:43 +00003278 break;
3279 }
3280 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3281 /* it has been revoked */
3282 tp1->sent = SCTP_DATAGRAM_SENT;
3283 tp1->rec.data.chunk_was_revoked = 1;
3284 /* We must add this stuff back in to
3285 * assure timers and such get started.
3286 */
3287 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3288 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3289 tp1->whoTo->flight_size,
3290 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01003291 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003292 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00003293 }
3294 sctp_flight_size_increase(tp1);
3295 sctp_total_flight_increase(stcb, tp1);
3296 /* We inflate the cwnd to compensate for our
3297 * artificial inflation of the flight_size.
3298 */
3299 tp1->whoTo->cwnd += tp1->book_size;
tuexendd729232011-11-01 23:04:43 +00003300 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3301 sctp_log_sack(asoc->last_acked_seq,
3302 cumack,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003303 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003304 0,
3305 0,
3306 SCTP_LOG_TSN_REVOKED);
3307 }
3308 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3309 /* it has been re-acked in this SACK */
3310 tp1->sent = SCTP_DATAGRAM_ACKED;
3311 }
3312 }
3313 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3314 break;
3315 }
3316}
3317
3318
3319static void
3320sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3321 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3322{
3323 struct sctp_tmit_chunk *tp1;
3324 int strike_flag = 0;
3325 struct timeval now;
3326 int tot_retrans = 0;
3327 uint32_t sending_seq;
3328 struct sctp_nets *net;
3329 int num_dests_sacked = 0;
3330
3331 /*
3332 * select the sending_seq, this is either the next thing ready to be
3333 * sent but not transmitted, OR, the next seq we assign.
3334 */
3335 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3336 if (tp1 == NULL) {
3337 sending_seq = asoc->sending_seq;
3338 } else {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003339 sending_seq = tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00003340 }
3341
3342 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3343 if ((asoc->sctp_cmt_on_off > 0) &&
3344 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3345 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3346 if (net->saw_newack)
3347 num_dests_sacked++;
3348 }
3349 }
t00fcxen0e78cef2014-08-02 22:05:33 +00003350 if (stcb->asoc.prsctp_supported) {
tuexendd729232011-11-01 23:04:43 +00003351 (void)SCTP_GETTIME_TIMEVAL(&now);
3352 }
3353 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3354 strike_flag = 0;
3355 if (tp1->no_fr_allowed) {
3356 /* this one had a timeout or something */
3357 continue;
3358 }
3359 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3360 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3361 sctp_log_fr(biggest_tsn_newly_acked,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003362 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003363 tp1->sent,
3364 SCTP_FR_LOG_CHECK_STRIKE);
3365 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01003366 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
tuexendd729232011-11-01 23:04:43 +00003367 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3368 /* done */
3369 break;
3370 }
t00fcxen0e78cef2014-08-02 22:05:33 +00003371 if (stcb->asoc.prsctp_supported) {
tuexendd729232011-11-01 23:04:43 +00003372 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3373 /* Is it expired? */
3374#ifndef __FreeBSD__
3375 if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
3376#else
3377 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3378#endif
3379 /* Yes so drop it */
3380 if (tp1->data != NULL) {
tuexenda53ff02012-05-14 09:00:59 +00003381 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
tuexendd729232011-11-01 23:04:43 +00003382 SCTP_SO_NOT_LOCKED);
3383 }
3384 continue;
3385 }
3386 }
3387
3388 }
Michael Tuexen83714a82018-01-16 23:02:09 +01003389 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3390 !(accum_moved && asoc->fast_retran_loss_recovery)) {
tuexendd729232011-11-01 23:04:43 +00003391 /* we are beyond the tsn in the sack */
3392 break;
3393 }
3394 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3395 /* either a RESEND, ACKED, or MARKED */
3396 /* skip */
3397 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3398 /* Continue strikin FWD-TSN chunks */
3399 tp1->rec.data.fwd_tsn_cnt++;
3400 }
3401 continue;
3402 }
3403 /*
3404 * CMT : SFR algo (covers part of DAC and HTNA as well)
3405 */
3406 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3407 /*
3408 * No new acks were receieved for data sent to this
3409 * dest. Therefore, according to the SFR algo for
3410 * CMT, no data sent to this dest can be marked for
3411 * FR using this SACK.
3412 */
3413 continue;
Michael Tuexen83714a82018-01-16 23:02:09 +01003414 } else if (tp1->whoTo &&
3415 SCTP_TSN_GT(tp1->rec.data.tsn,
3416 tp1->whoTo->this_sack_highest_newack) &&
3417 !(accum_moved && asoc->fast_retran_loss_recovery)) {
tuexendd729232011-11-01 23:04:43 +00003418 /*
3419 * CMT: New acks were receieved for data sent to
3420 * this dest. But no new acks were seen for data
3421 * sent after tp1. Therefore, according to the SFR
3422 * algo for CMT, tp1 cannot be marked for FR using
3423 * this SACK. This step covers part of the DAC algo
3424 * and the HTNA algo as well.
3425 */
3426 continue;
3427 }
3428 /*
3429 * Here we check to see if we were have already done a FR
3430 * and if so we see if the biggest TSN we saw in the sack is
3431 * smaller than the recovery point. If so we don't strike
3432 * the tsn... otherwise we CAN strike the TSN.
3433 */
3434 /*
3435 * @@@ JRI: Check for CMT
3436 * if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) {
3437 */
3438 if (accum_moved && asoc->fast_retran_loss_recovery) {
3439 /*
3440 * Strike the TSN if in fast-recovery and cum-ack
3441 * moved.
3442 */
3443 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3444 sctp_log_fr(biggest_tsn_newly_acked,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003445 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003446 tp1->sent,
3447 SCTP_FR_LOG_STRIKE_CHUNK);
3448 }
3449 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3450 tp1->sent++;
3451 }
3452 if ((asoc->sctp_cmt_on_off > 0) &&
3453 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3454 /*
3455 * CMT DAC algorithm: If SACK flag is set to
3456 * 0, then lowest_newack test will not pass
3457 * because it would have been set to the
3458 * cumack earlier. If not already to be
3459 * rtx'd, If not a mixed sack and if tp1 is
3460 * not between two sacked TSNs, then mark by
3461 * one more.
3462 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3463 * two packets have been received after this missing TSN.
3464 */
3465 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
Michael Tuexen00657ac2016-12-07 21:53:26 +01003466 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
tuexendd729232011-11-01 23:04:43 +00003467 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3468 sctp_log_fr(16 + num_dests_sacked,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003469 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003470 tp1->sent,
3471 SCTP_FR_LOG_STRIKE_CHUNK);
3472 }
3473 tp1->sent++;
3474 }
3475 }
3476 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3477 (asoc->sctp_cmt_on_off == 0)) {
3478 /*
3479 * For those that have done a FR we must take
3480 * special consideration if we strike. I.e the
3481 * biggest_newly_acked must be higher than the
3482 * sending_seq at the time we did the FR.
3483 */
3484 if (
3485#ifdef SCTP_FR_TO_ALTERNATE
3486 /*
3487 * If FR's go to new networks, then we must only do
3488 * this for singly homed asoc's. However if the FR's
3489 * go to the same network (Armando's work) then its
3490 * ok to FR multiple times.
3491 */
3492 (asoc->numnets < 2)
3493#else
3494 (1)
3495#endif
3496 ) {
3497
3498 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3499 tp1->rec.data.fast_retran_tsn)) {
3500 /*
3501 * Strike the TSN, since this ack is
3502 * beyond where things were when we
3503 * did a FR.
3504 */
3505 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3506 sctp_log_fr(biggest_tsn_newly_acked,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003507 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003508 tp1->sent,
3509 SCTP_FR_LOG_STRIKE_CHUNK);
3510 }
3511 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3512 tp1->sent++;
3513 }
3514 strike_flag = 1;
3515 if ((asoc->sctp_cmt_on_off > 0) &&
3516 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3517 /*
3518 * CMT DAC algorithm: If
3519 * SACK flag is set to 0,
3520 * then lowest_newack test
3521 * will not pass because it
3522 * would have been set to
3523 * the cumack earlier. If
3524 * not already to be rtx'd,
3525 * If not a mixed sack and
3526 * if tp1 is not between two
3527 * sacked TSNs, then mark by
3528 * one more.
3529 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3530 * two packets have been received after this missing TSN.
3531 */
3532 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3533 (num_dests_sacked == 1) &&
3534 SCTP_TSN_GT(this_sack_lowest_newack,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003535 tp1->rec.data.tsn)) {
tuexendd729232011-11-01 23:04:43 +00003536 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3537 sctp_log_fr(32 + num_dests_sacked,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003538 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003539 tp1->sent,
3540 SCTP_FR_LOG_STRIKE_CHUNK);
3541 }
3542 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3543 tp1->sent++;
3544 }
3545 }
3546 }
3547 }
3548 }
3549 /*
3550 * JRI: TODO: remove code for HTNA algo. CMT's
3551 * SFR algo covers HTNA.
3552 */
Michael Tuexen00657ac2016-12-07 21:53:26 +01003553 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003554 biggest_tsn_newly_acked)) {
3555 /*
3556 * We don't strike these: This is the HTNA
3557 * algorithm i.e. we don't strike If our TSN is
3558 * larger than the Highest TSN Newly Acked.
3559 */
3560 ;
3561 } else {
3562 /* Strike the TSN */
3563 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3564 sctp_log_fr(biggest_tsn_newly_acked,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003565 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003566 tp1->sent,
3567 SCTP_FR_LOG_STRIKE_CHUNK);
3568 }
3569 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3570 tp1->sent++;
3571 }
3572 if ((asoc->sctp_cmt_on_off > 0) &&
3573 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3574 /*
3575 * CMT DAC algorithm: If SACK flag is set to
3576 * 0, then lowest_newack test will not pass
3577 * because it would have been set to the
3578 * cumack earlier. If not already to be
3579 * rtx'd, If not a mixed sack and if tp1 is
3580 * not between two sacked TSNs, then mark by
3581 * one more.
3582 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3583 * two packets have been received after this missing TSN.
3584 */
3585 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
Michael Tuexen00657ac2016-12-07 21:53:26 +01003586 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
tuexendd729232011-11-01 23:04:43 +00003587 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3588 sctp_log_fr(48 + num_dests_sacked,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003589 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003590 tp1->sent,
3591 SCTP_FR_LOG_STRIKE_CHUNK);
3592 }
3593 tp1->sent++;
3594 }
3595 }
3596 }
3597 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3598 struct sctp_nets *alt;
3599
3600 /* fix counts and things */
3601 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3602 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3603 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3604 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01003605 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003606 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00003607 }
3608 if (tp1->whoTo) {
3609 tp1->whoTo->net_ack++;
3610 sctp_flight_size_decrease(tp1);
3611 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3612 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3613 tp1);
3614 }
3615 }
tuexen15f99d82012-04-19 16:08:38 +00003616
tuexendd729232011-11-01 23:04:43 +00003617 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3618 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3619 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3620 }
3621 /* add back to the rwnd */
3622 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
tuexen15f99d82012-04-19 16:08:38 +00003623
tuexendd729232011-11-01 23:04:43 +00003624 /* remove from the total flight */
3625 sctp_total_flight_decrease(stcb, tp1);
3626
t00fcxen0e78cef2014-08-02 22:05:33 +00003627 if ((stcb->asoc.prsctp_supported) &&
tuexendd729232011-11-01 23:04:43 +00003628 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3629 /* Has it been retransmitted tv_sec times? - we store the retran count there. */
3630 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3631 /* Yes, so drop it */
3632 if (tp1->data != NULL) {
tuexenda53ff02012-05-14 09:00:59 +00003633 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
tuexendd729232011-11-01 23:04:43 +00003634 SCTP_SO_NOT_LOCKED);
3635 }
3636 /* Make sure to flag we had a FR */
Michael Tuexen4d933602018-05-06 16:23:44 +02003637 if (tp1->whoTo != NULL) {
3638 tp1->whoTo->net_ack++;
3639 }
tuexendd729232011-11-01 23:04:43 +00003640 continue;
3641 }
tuexen15f99d82012-04-19 16:08:38 +00003642 }
tuexencb5fe8d2012-05-04 09:50:27 +00003643 /* SCTP_PRINTF("OK, we are now ready to FR this guy\n"); */
tuexendd729232011-11-01 23:04:43 +00003644 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003645 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
tuexendd729232011-11-01 23:04:43 +00003646 0, SCTP_FR_MARKED);
3647 }
3648 if (strike_flag) {
3649 /* This is a subsequent FR */
3650 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3651 }
3652 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3653 if (asoc->sctp_cmt_on_off > 0) {
3654 /*
3655 * CMT: Using RTX_SSTHRESH policy for CMT.
3656 * If CMT is being used, then pick dest with
3657 * largest ssthresh for any retransmission.
3658 */
3659 tp1->no_fr_allowed = 1;
3660 alt = tp1->whoTo;
3661 /*sa_ignore NO_NULL_CHK*/
3662 if (asoc->sctp_cmt_pf > 0) {
3663 /* JRS 5/18/07 - If CMT PF is on, use the PF version of find_alt_net() */
3664 alt = sctp_find_alternate_net(stcb, alt, 2);
3665 } else {
3666 /* JRS 5/18/07 - If only CMT is on, use the CMT version of find_alt_net() */
3667 /*sa_ignore NO_NULL_CHK*/
3668 alt = sctp_find_alternate_net(stcb, alt, 1);
3669 }
3670 if (alt == NULL) {
3671 alt = tp1->whoTo;
3672 }
3673 /*
3674 * CUCv2: If a different dest is picked for
3675 * the retransmission, then new
3676 * (rtx-)pseudo_cumack needs to be tracked
3677 * for orig dest. Let CUCv2 track new (rtx-)
3678 * pseudo-cumack always.
3679 */
3680 if (tp1->whoTo) {
3681 tp1->whoTo->find_pseudo_cumack = 1;
3682 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3683 }
3684
3685 } else {/* CMT is OFF */
3686
3687#ifdef SCTP_FR_TO_ALTERNATE
3688 /* Can we find an alternate? */
3689 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3690#else
3691 /*
3692 * default behavior is to NOT retransmit
3693 * FR's to an alternate. Armando Caro's
3694 * paper details why.
3695 */
3696 alt = tp1->whoTo;
3697#endif
3698 }
3699
3700 tp1->rec.data.doing_fast_retransmit = 1;
3701 tot_retrans++;
3702 /* mark the sending seq for possible subsequent FR's */
3703 /*
tuexencb5fe8d2012-05-04 09:50:27 +00003704 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01003705 * (uint32_t)tpi->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00003706 */
3707 if (TAILQ_EMPTY(&asoc->send_queue)) {
3708 /*
3709 * If the queue of send is empty then its
3710 * the next sequence number that will be
3711 * assigned so we subtract one from this to
3712 * get the one we last sent.
3713 */
3714 tp1->rec.data.fast_retran_tsn = sending_seq;
3715 } else {
3716 /*
3717 * If there are chunks on the send queue
3718 * (unsent data that has made it from the
3719 * stream queues but not out the door, we
3720 * take the first one (which will have the
3721 * lowest TSN) and subtract one to get the
3722 * one we last sent.
3723 */
3724 struct sctp_tmit_chunk *ttt;
3725
3726 ttt = TAILQ_FIRST(&asoc->send_queue);
3727 tp1->rec.data.fast_retran_tsn =
Michael Tuexen00657ac2016-12-07 21:53:26 +01003728 ttt->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00003729 }
3730
3731 if (tp1->do_rtt) {
3732 /*
3733 * this guy had a RTO calculation pending on
3734 * it, cancel it
3735 */
tuexen63fc0bb2011-12-27 12:24:52 +00003736 if ((tp1->whoTo != NULL) &&
3737 (tp1->whoTo->rto_needed == 0)) {
tuexendd729232011-11-01 23:04:43 +00003738 tp1->whoTo->rto_needed = 1;
3739 }
3740 tp1->do_rtt = 0;
3741 }
3742 if (alt != tp1->whoTo) {
3743 /* yes, there is an alternate. */
3744 sctp_free_remote_addr(tp1->whoTo);
3745 /*sa_ignore FREED_MEMORY*/
3746 tp1->whoTo = alt;
3747 atomic_add_int(&alt->ref_count, 1);
3748 }
3749 }
3750 }
3751}
3752
3753struct sctp_tmit_chunk *
3754sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3755 struct sctp_association *asoc)
3756{
3757 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3758 struct timeval now;
3759 int now_filled = 0;
3760
t00fcxen0e78cef2014-08-02 22:05:33 +00003761 if (asoc->prsctp_supported == 0) {
tuexendd729232011-11-01 23:04:43 +00003762 return (NULL);
3763 }
3764 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3765 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
t00fcxen06a2a532012-11-07 21:03:47 +00003766 tp1->sent != SCTP_DATAGRAM_RESEND &&
t00fcxen8fcc5142012-11-16 19:46:12 +00003767 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
tuexendd729232011-11-01 23:04:43 +00003768 /* no chance to advance, out of here */
3769 break;
3770 }
3771 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
t00fcxen9ad90772012-11-07 22:19:57 +00003772 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
t00fcxen8fcc5142012-11-16 19:46:12 +00003773 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
tuexendd729232011-11-01 23:04:43 +00003774 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3775 asoc->advanced_peer_ack_point,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003776 tp1->rec.data.tsn, 0, 0);
tuexendd729232011-11-01 23:04:43 +00003777 }
3778 }
3779 if (!PR_SCTP_ENABLED(tp1->flags)) {
3780 /*
3781 * We can't fwd-tsn past any that are reliable aka
3782 * retransmitted until the asoc fails.
3783 */
3784 break;
3785 }
3786 if (!now_filled) {
3787 (void)SCTP_GETTIME_TIMEVAL(&now);
3788 now_filled = 1;
3789 }
3790 /*
3791 * now we got a chunk which is marked for another
3792 * retransmission to a PR-stream but has run out its chances
3793 * already maybe OR has been marked to skip now. Can we skip
3794 * it if its a resend?
3795 */
3796 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3797 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3798 /*
3799 * Now is this one marked for resend and its time is
3800 * now up?
3801 */
3802#ifndef __FreeBSD__
3803 if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
3804#else
3805 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3806#endif
3807 /* Yes so drop it */
3808 if (tp1->data) {
3809 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
tuexenda53ff02012-05-14 09:00:59 +00003810 1, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00003811 }
3812 } else {
3813 /*
3814 * No, we are done when hit one for resend
3815 * whos time as not expired.
3816 */
3817 break;
3818 }
3819 }
3820 /*
3821 * Ok now if this chunk is marked to drop it we can clean up
3822 * the chunk, advance our peer ack point and we can check
3823 * the next chunk.
3824 */
t00fcxen06a2a532012-11-07 21:03:47 +00003825 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
t00fcxen8fcc5142012-11-16 19:46:12 +00003826 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
tuexendd729232011-11-01 23:04:43 +00003827 /* advance PeerAckPoint goes forward */
Michael Tuexen00657ac2016-12-07 21:53:26 +01003828 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3829 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00003830 a_adv = tp1;
Michael Tuexen00657ac2016-12-07 21:53:26 +01003831 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
tuexendd729232011-11-01 23:04:43 +00003832 /* No update but we do save the chk */
3833 a_adv = tp1;
3834 }
3835 } else {
3836 /*
3837 * If it is still in RESEND we can advance no
3838 * further
3839 */
3840 break;
3841 }
3842 }
3843 return (a_adv);
3844}
3845
3846static int
3847sctp_fs_audit(struct sctp_association *asoc)
3848{
3849 struct sctp_tmit_chunk *chk;
tuexen63fc0bb2011-12-27 12:24:52 +00003850 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
t00fcxen006c3bc2015-05-28 14:33:28 +00003851 int ret;
3852#ifndef INVARIANTS
3853 int entry_flight, entry_cnt;
3854#endif
t00fcxen2ea88ad2014-02-20 20:24:25 +00003855
t00fcxen006c3bc2015-05-28 14:33:28 +00003856 ret = 0;
3857#ifndef INVARIANTS
tuexendd729232011-11-01 23:04:43 +00003858 entry_flight = asoc->total_flight;
3859 entry_cnt = asoc->total_flight_count;
t00fcxen006c3bc2015-05-28 14:33:28 +00003860#endif
tuexendd729232011-11-01 23:04:43 +00003861 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3862 return (0);
3863
3864 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3865 if (chk->sent < SCTP_DATAGRAM_RESEND) {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02003866 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01003867 chk->rec.data.tsn,
tuexencb5fe8d2012-05-04 09:50:27 +00003868 chk->send_size,
3869 chk->snd_count);
tuexendd729232011-11-01 23:04:43 +00003870 inflight++;
3871 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3872 resend++;
3873 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3874 inbetween++;
3875 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3876 above++;
3877 } else {
3878 acked++;
3879 }
3880 }
3881
3882 if ((inflight > 0) || (inbetween > 0)) {
3883#ifdef INVARIANTS
3884 panic("Flight size-express incorrect? \n");
3885#else
Michael Tuexeneccb4be2016-04-18 08:58:59 +02003886 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
tuexencb5fe8d2012-05-04 09:50:27 +00003887 entry_flight, entry_cnt);
tuexendd729232011-11-01 23:04:43 +00003888
Michael Tuexeneccb4be2016-04-18 08:58:59 +02003889 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
tuexendd729232011-11-01 23:04:43 +00003890 inflight, inbetween, resend, above, acked);
3891 ret = 1;
3892#endif
3893 }
3894 return (ret);
3895}
3896
3897
3898static void
3899sctp_window_probe_recovery(struct sctp_tcb *stcb,
3900 struct sctp_association *asoc,
tuexendd729232011-11-01 23:04:43 +00003901 struct sctp_tmit_chunk *tp1)
3902{
3903 tp1->window_probe = 0;
3904 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3905 /* TSN's skipped we do NOT move back. */
3906 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
t00fcxenf95cdf42015-03-24 15:12:04 +00003907 tp1->whoTo ? tp1->whoTo->flight_size : 0,
tuexendd729232011-11-01 23:04:43 +00003908 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01003909 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003910 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00003911 return;
3912 }
3913 /* First setup this by shrinking flight */
3914 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3915 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3916 tp1);
3917 }
3918 sctp_flight_size_decrease(tp1);
3919 sctp_total_flight_decrease(stcb, tp1);
3920 /* Now mark for resend */
3921 tp1->sent = SCTP_DATAGRAM_RESEND;
3922 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
tuexen15f99d82012-04-19 16:08:38 +00003923
tuexendd729232011-11-01 23:04:43 +00003924 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3925 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3926 tp1->whoTo->flight_size,
3927 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01003928 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003929 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00003930 }
3931}
3932
3933void
3934sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3935 uint32_t rwnd, int *abort_now, int ecne_seen)
3936{
3937 struct sctp_nets *net;
3938 struct sctp_association *asoc;
3939 struct sctp_tmit_chunk *tp1, *tp2;
3940 uint32_t old_rwnd;
3941 int win_probe_recovery = 0;
3942 int win_probe_recovered = 0;
3943 int j, done_once = 0;
tuexen63fc0bb2011-12-27 12:24:52 +00003944 int rto_ok = 1;
Michael Tuexen0ec21502016-05-12 18:39:01 +02003945 uint32_t send_s;
tuexendd729232011-11-01 23:04:43 +00003946
3947 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3948 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3949 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3950 }
3951 SCTP_TCB_LOCK_ASSERT(stcb);
3952#ifdef SCTP_ASOCLOG_OF_TSNS
3953 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3954 stcb->asoc.cumack_log_at++;
3955 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3956 stcb->asoc.cumack_log_at = 0;
3957 }
3958#endif
3959 asoc = &stcb->asoc;
3960 old_rwnd = asoc->peers_rwnd;
3961 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3962 /* old ack */
3963 return;
3964 } else if (asoc->last_acked_seq == cumack) {
3965 /* Window update sack */
3966 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3967 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3968 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3969 /* SWS sender side engages */
3970 asoc->peers_rwnd = 0;
3971 }
3972 if (asoc->peers_rwnd > old_rwnd) {
3973 goto again;
3974 }
3975 return;
3976 }
3977
3978 /* First setup for CC stuff */
3979 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3980 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3981 /* Drag along the window_tsn for cwr's */
3982 net->cwr_window_tsn = cumack;
3983 }
3984 net->prev_cwnd = net->cwnd;
3985 net->net_ack = 0;
3986 net->net_ack2 = 0;
3987
3988 /*
3989 * CMT: Reset CUC and Fast recovery algo variables before
3990 * SACK processing
3991 */
3992 net->new_pseudo_cumack = 0;
3993 net->will_exit_fast_recovery = 0;
3994 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3995 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
3996 }
3997 }
Michael Tuexen0ec21502016-05-12 18:39:01 +02003998 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3999 tp1 = TAILQ_LAST(&asoc->sent_queue,
4000 sctpchunk_listhead);
Michael Tuexen00657ac2016-12-07 21:53:26 +01004001 send_s = tp1->rec.data.tsn + 1;
Michael Tuexen0ec21502016-05-12 18:39:01 +02004002 } else {
4003 send_s = asoc->sending_seq;
4004 }
4005 if (SCTP_TSN_GE(cumack, send_s)) {
4006 struct mbuf *op_err;
4007 char msg[SCTP_DIAG_INFO_LEN];
tuexendd729232011-11-01 23:04:43 +00004008
Michael Tuexen0ec21502016-05-12 18:39:01 +02004009 *abort_now = 1;
4010 /* XXX */
Michael Tuexenedd369d2020-05-19 09:42:15 +02004011 SCTP_SNPRINTF(msg, sizeof(msg),
4012 "Cum ack %8.8x greater or equal than TSN %8.8x",
4013 cumack, send_s);
Michael Tuexen0ec21502016-05-12 18:39:01 +02004014 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexen91565952020-02-03 23:23:28 +01004015 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
Michael Tuexen0ec21502016-05-12 18:39:01 +02004016 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4017 return;
tuexendd729232011-11-01 23:04:43 +00004018 }
4019 asoc->this_sack_highest_gap = cumack;
4020 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4021 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4022 stcb->asoc.overall_error_count,
4023 0,
4024 SCTP_FROM_SCTP_INDATA,
4025 __LINE__);
4026 }
4027 stcb->asoc.overall_error_count = 0;
4028 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4029 /* process the new consecutive TSN first */
4030 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004031 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
tuexendd729232011-11-01 23:04:43 +00004032 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
tuexencb5fe8d2012-05-04 09:50:27 +00004033 SCTP_PRINTF("Warning, an unsent is now acked?\n");
tuexendd729232011-11-01 23:04:43 +00004034 }
4035 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4036 /*
4037 * If it is less than ACKED, it is
4038 * now no-longer in flight. Higher
4039 * values may occur during marking
4040 */
4041 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4042 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4043 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4044 tp1->whoTo->flight_size,
4045 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01004046 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01004047 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00004048 }
4049 sctp_flight_size_decrease(tp1);
4050 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4051 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
4052 tp1);
4053 }
4054 /* sa_ignore NO_NULL_CHK */
4055 sctp_total_flight_decrease(stcb, tp1);
4056 }
4057 tp1->whoTo->net_ack += tp1->send_size;
4058 if (tp1->snd_count < 2) {
4059 /*
Michael Tuexenc51af972018-08-12 15:32:55 +02004060 * True non-retransmitted
tuexendd729232011-11-01 23:04:43 +00004061 * chunk
4062 */
4063 tp1->whoTo->net_ack2 +=
4064 tp1->send_size;
4065
4066 /* update RTO too? */
4067 if (tp1->do_rtt) {
Michael Tuexenb7ed78b2019-09-22 12:48:36 +02004068 if (rto_ok &&
4069 sctp_calculate_rto(stcb,
4070 &stcb->asoc,
4071 tp1->whoTo,
4072 &tp1->sent_rcv_time,
4073 SCTP_RTT_FROM_DATA)) {
tuexendd729232011-11-01 23:04:43 +00004074 rto_ok = 0;
4075 }
4076 if (tp1->whoTo->rto_needed == 0) {
4077 tp1->whoTo->rto_needed = 1;
4078 }
4079 tp1->do_rtt = 0;
4080 }
4081 }
4082 /*
4083 * CMT: CUCv2 algorithm. From the
4084 * cumack'd TSNs, for each TSN being
4085 * acked for the first time, set the
4086 * following variables for the
4087 * corresp destination.
4088 * new_pseudo_cumack will trigger a
4089 * cwnd update.
4090 * find_(rtx_)pseudo_cumack will
4091 * trigger search for the next
4092 * expected (rtx-)pseudo-cumack.
4093 */
4094 tp1->whoTo->new_pseudo_cumack = 1;
4095 tp1->whoTo->find_pseudo_cumack = 1;
4096 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4097
4098 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4099 /* sa_ignore NO_NULL_CHK */
Michael Tuexen00657ac2016-12-07 21:53:26 +01004100 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
tuexendd729232011-11-01 23:04:43 +00004101 }
4102 }
4103 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4104 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4105 }
4106 if (tp1->rec.data.chunk_was_revoked) {
4107 /* deflate the cwnd */
4108 tp1->whoTo->cwnd -= tp1->book_size;
4109 tp1->rec.data.chunk_was_revoked = 0;
4110 }
t00fcxen8fcc5142012-11-16 19:46:12 +00004111 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004112 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4113 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
t00fcxen9ad90772012-11-07 22:19:57 +00004114#ifdef INVARIANTS
4115 } else {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004116 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
t00fcxen9ad90772012-11-07 22:19:57 +00004117#endif
4118 }
4119 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01004120 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4121 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4122 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
Michael Tuexenc0a12d12015-12-03 16:30:24 +01004123 asoc->trigger_reset = 1;
4124 }
tuexendd729232011-11-01 23:04:43 +00004125 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4126 if (tp1->data) {
4127 /* sa_ignore NO_NULL_CHK */
4128 sctp_free_bufspace(stcb, asoc, tp1, 1);
4129 sctp_m_freem(tp1->data);
4130 tp1->data = NULL;
4131 }
4132 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4133 sctp_log_sack(asoc->last_acked_seq,
4134 cumack,
Michael Tuexen00657ac2016-12-07 21:53:26 +01004135 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00004136 0,
4137 0,
4138 SCTP_LOG_FREE_SENT);
4139 }
4140 asoc->sent_queue_cnt--;
4141 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4142 } else {
4143 break;
4144 }
4145 }
4146
4147 }
tuexen6bffa9a2012-06-25 17:40:03 +00004148#if defined(__Userspace__)
tuexen98456cf2012-04-19 15:37:07 +00004149 if (stcb->sctp_ep->recv_callback) {
4150 if (stcb->sctp_socket) {
4151 uint32_t inqueue_bytes, sb_free_now;
4152 struct sctp_inpcb *inp;
tuexen749d8562011-11-13 13:41:49 +00004153
tuexen98456cf2012-04-19 15:37:07 +00004154 inp = stcb->sctp_ep;
tuexen749d8562011-11-13 13:41:49 +00004155 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
tuexen98456cf2012-04-19 15:37:07 +00004156 sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
4157
4158 /* check if the amount free in the send socket buffer crossed the threshold */
4159 if (inp->send_callback &&
4160 (((inp->send_sb_threshold > 0) &&
4161 (sb_free_now >= inp->send_sb_threshold) &&
4162 (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
4163 (inp->send_sb_threshold == 0))) {
4164 atomic_add_int(&stcb->asoc.refcnt, 1);
4165 SCTP_TCB_UNLOCK(stcb);
4166 inp->send_callback(stcb->sctp_socket, sb_free_now);
4167 SCTP_TCB_LOCK(stcb);
4168 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4169 }
tuexen749d8562011-11-13 13:41:49 +00004170 }
tuexen98456cf2012-04-19 15:37:07 +00004171 } else if (stcb->sctp_socket) {
tuexen749d8562011-11-13 13:41:49 +00004172#else
tuexendd729232011-11-01 23:04:43 +00004173 /* sa_ignore NO_NULL_CHK */
4174 if (stcb->sctp_socket) {
tuexen98456cf2012-04-19 15:37:07 +00004175#endif
tuexen6bffa9a2012-06-25 17:40:03 +00004176#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
tuexendd729232011-11-01 23:04:43 +00004177 struct socket *so;
4178
4179#endif
4180 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4181 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4182 /* sa_ignore NO_NULL_CHK */
tuexen9784e9a2011-12-18 13:04:23 +00004183 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
tuexendd729232011-11-01 23:04:43 +00004184 }
tuexen6bffa9a2012-06-25 17:40:03 +00004185#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
tuexendd729232011-11-01 23:04:43 +00004186 so = SCTP_INP_SO(stcb->sctp_ep);
4187 atomic_add_int(&stcb->asoc.refcnt, 1);
4188 SCTP_TCB_UNLOCK(stcb);
4189 SCTP_SOCKET_LOCK(so, 1);
4190 SCTP_TCB_LOCK(stcb);
4191 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4192 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4193 /* assoc was freed while we were unlocked */
4194 SCTP_SOCKET_UNLOCK(so, 1);
4195 return;
4196 }
4197#endif
4198 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
tuexen6bffa9a2012-06-25 17:40:03 +00004199#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
tuexendd729232011-11-01 23:04:43 +00004200 SCTP_SOCKET_UNLOCK(so, 1);
4201#endif
4202 } else {
4203 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
tuexen9784e9a2011-12-18 13:04:23 +00004204 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
tuexendd729232011-11-01 23:04:43 +00004205 }
4206 }
4207
4208 /* JRS - Use the congestion control given in the CC module */
4209 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4210 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4211 if (net->net_ack2 > 0) {
4212 /*
4213 * Karn's rule applies to clearing error count, this
4214 * is optional.
4215 */
4216 net->error_count = 0;
4217 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4218 /* addr came good */
4219 net->dest_state |= SCTP_ADDR_REACHABLE;
4220 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
tuexenda53ff02012-05-14 09:00:59 +00004221 0, (void *)net, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00004222 }
4223 if (net == stcb->asoc.primary_destination) {
4224 if (stcb->asoc.alternate) {
4225 /* release the alternate, primary is good */
4226 sctp_free_remote_addr(stcb->asoc.alternate);
4227 stcb->asoc.alternate = NULL;
4228 }
4229 }
4230 if (net->dest_state & SCTP_ADDR_PF) {
4231 net->dest_state &= ~SCTP_ADDR_PF;
t00fcxen0057a6d2015-05-28 16:42:49 +00004232 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4233 stcb->sctp_ep, stcb, net,
Michael Tuexen91565952020-02-03 23:23:28 +01004234 SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
tuexendd729232011-11-01 23:04:43 +00004235 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4236 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4237 /* Done with this net */
4238 net->net_ack = 0;
4239 }
4240 /* restore any doubled timers */
4241 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4242 if (net->RTO < stcb->asoc.minrto) {
4243 net->RTO = stcb->asoc.minrto;
4244 }
4245 if (net->RTO > stcb->asoc.maxrto) {
4246 net->RTO = stcb->asoc.maxrto;
4247 }
4248 }
4249 }
4250 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4251 }
4252 asoc->last_acked_seq = cumack;
4253
4254 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4255 /* nothing left in-flight */
4256 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4257 net->flight_size = 0;
4258 net->partial_bytes_acked = 0;
4259 }
4260 asoc->total_flight = 0;
4261 asoc->total_flight_count = 0;
4262 }
4263
4264 /* RWND update */
4265 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4266 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4267 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4268 /* SWS sender side engages */
4269 asoc->peers_rwnd = 0;
4270 }
4271 if (asoc->peers_rwnd > old_rwnd) {
4272 win_probe_recovery = 1;
4273 }
4274 /* Now assure a timer where data is queued at */
4275again:
4276 j = 0;
4277 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
tuexendd729232011-11-01 23:04:43 +00004278 if (win_probe_recovery && (net->window_probe)) {
4279 win_probe_recovered = 1;
4280 /*
4281 * Find first chunk that was used with window probe
4282 * and clear the sent
4283 */
4284 /* sa_ignore FREED_MEMORY */
4285 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4286 if (tp1->window_probe) {
4287 /* move back to data send queue */
tuexen9784e9a2011-12-18 13:04:23 +00004288 sctp_window_probe_recovery(stcb, asoc, tp1);
tuexendd729232011-11-01 23:04:43 +00004289 break;
4290 }
4291 }
4292 }
tuexendd729232011-11-01 23:04:43 +00004293 if (net->flight_size) {
4294 j++;
Michael Tuexena7360a12017-09-17 11:30:34 +02004295 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
tuexendd729232011-11-01 23:04:43 +00004296 if (net->window_probe) {
4297 net->window_probe = 0;
4298 }
4299 } else {
4300 if (net->window_probe) {
4301 /* In window probes we must assure a timer is still running there */
4302 net->window_probe = 0;
4303 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
Michael Tuexena7360a12017-09-17 11:30:34 +02004304 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
tuexendd729232011-11-01 23:04:43 +00004305 }
4306 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4307 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4308 stcb, net,
Michael Tuexen91565952020-02-03 23:23:28 +01004309 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
tuexendd729232011-11-01 23:04:43 +00004310 }
4311 }
4312 }
4313 if ((j == 0) &&
4314 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4315 (asoc->sent_queue_retran_cnt == 0) &&
4316 (win_probe_recovered == 0) &&
4317 (done_once == 0)) {
4318 /* huh, this should not happen unless all packets
4319 * are PR-SCTP and marked to skip of course.
4320 */
4321 if (sctp_fs_audit(asoc)) {
4322 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4323 net->flight_size = 0;
4324 }
4325 asoc->total_flight = 0;
4326 asoc->total_flight_count = 0;
4327 asoc->sent_queue_retran_cnt = 0;
4328 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4329 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4330 sctp_flight_size_increase(tp1);
4331 sctp_total_flight_increase(stcb, tp1);
4332 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4333 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4334 }
4335 }
4336 }
4337 done_once = 1;
4338 goto again;
4339 }
4340 /**********************************/
4341 /* Now what about shutdown issues */
4342 /**********************************/
4343 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4344 /* nothing left on sendqueue.. consider done */
4345 /* clean up */
4346 if ((asoc->stream_queue_cnt == 1) &&
4347 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
Michael Tuexen348a36c2018-08-13 16:24:47 +02004348 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
Michael Tuexenfdcf7902016-08-06 14:39:31 +02004349 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
Michael Tuexen348a36c2018-08-13 16:24:47 +02004350 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
tuexendd729232011-11-01 23:04:43 +00004351 }
Michael Tuexen74842cb2017-07-20 13:15:46 +02004352 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
Michael Tuexen348a36c2018-08-13 16:24:47 +02004353 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
Michael Tuexen74842cb2017-07-20 13:15:46 +02004354 (asoc->stream_queue_cnt == 1) &&
4355 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4356 struct mbuf *op_err;
4357
4358 *abort_now = 1;
4359 /* XXX */
4360 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
Michael Tuexen91565952020-02-03 23:23:28 +01004361 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_27;
Michael Tuexen74842cb2017-07-20 13:15:46 +02004362 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4363 return;
4364 }
tuexendd729232011-11-01 23:04:43 +00004365 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4366 (asoc->stream_queue_cnt == 0)) {
Michael Tuexen74842cb2017-07-20 13:15:46 +02004367 struct sctp_nets *netp;
tuexendd729232011-11-01 23:04:43 +00004368
Michael Tuexen348a36c2018-08-13 16:24:47 +02004369 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4370 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
Michael Tuexen74842cb2017-07-20 13:15:46 +02004371 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
tuexendd729232011-11-01 23:04:43 +00004372 }
Michael Tuexen348a36c2018-08-13 16:24:47 +02004373 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
Michael Tuexen74842cb2017-07-20 13:15:46 +02004374 sctp_stop_timers_for_shutdown(stcb);
4375 if (asoc->alternate) {
4376 netp = asoc->alternate;
4377 } else {
4378 netp = asoc->primary_destination;
4379 }
4380 sctp_send_shutdown(stcb, netp);
4381 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4382 stcb->sctp_ep, stcb, netp);
4383 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
Michael Tuexend07a5f22020-03-19 23:34:46 +01004384 stcb->sctp_ep, stcb, NULL);
Michael Tuexen348a36c2018-08-13 16:24:47 +02004385 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
tuexendd729232011-11-01 23:04:43 +00004386 (asoc->stream_queue_cnt == 0)) {
4387 struct sctp_nets *netp;
t00fcxend0ad16b2013-02-09 18:34:24 +00004388
tuexendd729232011-11-01 23:04:43 +00004389 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
Michael Tuexen348a36c2018-08-13 16:24:47 +02004390 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
tuexendd729232011-11-01 23:04:43 +00004391 sctp_stop_timers_for_shutdown(stcb);
t00fcxend0ad16b2013-02-09 18:34:24 +00004392 if (asoc->alternate) {
4393 netp = asoc->alternate;
4394 } else {
4395 netp = asoc->primary_destination;
4396 }
4397 sctp_send_shutdown_ack(stcb, netp);
tuexendd729232011-11-01 23:04:43 +00004398 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4399 stcb->sctp_ep, stcb, netp);
4400 }
4401 }
4402 /*********************************************/
4403 /* Here we perform PR-SCTP procedures */
4404 /* (section 4.2) */
4405 /*********************************************/
4406 /* C1. update advancedPeerAckPoint */
4407 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4408 asoc->advanced_peer_ack_point = cumack;
4409 }
4410 /* PR-Sctp issues need to be addressed too */
t00fcxen0e78cef2014-08-02 22:05:33 +00004411 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
tuexendd729232011-11-01 23:04:43 +00004412 struct sctp_tmit_chunk *lchk;
4413 uint32_t old_adv_peer_ack_point;
tuexen15f99d82012-04-19 16:08:38 +00004414
tuexendd729232011-11-01 23:04:43 +00004415 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4416 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4417 /* C3. See if we need to send a Fwd-TSN */
4418 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4419 /*
4420 * ISSUE with ECN, see FWD-TSN processing.
4421 */
4422 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4423 send_forward_tsn(stcb, asoc);
4424 } else if (lchk) {
4425 /* try to FR fwd-tsn's that get lost too */
4426 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4427 send_forward_tsn(stcb, asoc);
4428 }
4429 }
4430 }
Michael Tuexena8f3d9d2020-05-10 19:35:08 +02004431 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4432 if (lchk->whoTo != NULL) {
4433 break;
4434 }
4435 }
4436 if (lchk != NULL) {
tuexendd729232011-11-01 23:04:43 +00004437 /* Assure a timer is up */
4438 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
Michael Tuexena8f3d9d2020-05-10 19:35:08 +02004439 stcb->sctp_ep, stcb, lchk->whoTo);
tuexendd729232011-11-01 23:04:43 +00004440 }
4441 }
4442 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4443 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4444 rwnd,
4445 stcb->asoc.peers_rwnd,
4446 stcb->asoc.total_flight,
4447 stcb->asoc.total_output_queue_size);
4448 }
4449}
4450
4451void
4452sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
tuexen9784e9a2011-12-18 13:04:23 +00004453 struct sctp_tcb *stcb,
tuexendd729232011-11-01 23:04:43 +00004454 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4455 int *abort_now, uint8_t flags,
4456 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4457{
4458 struct sctp_association *asoc;
4459 struct sctp_tmit_chunk *tp1, *tp2;
4460 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
tuexendd729232011-11-01 23:04:43 +00004461 uint16_t wake_him = 0;
4462 uint32_t send_s = 0;
4463 long j;
4464 int accum_moved = 0;
4465 int will_exit_fast_recovery = 0;
4466 uint32_t a_rwnd, old_rwnd;
4467 int win_probe_recovery = 0;
4468 int win_probe_recovered = 0;
4469 struct sctp_nets *net = NULL;
tuexendd729232011-11-01 23:04:43 +00004470 int done_once;
tuexen63fc0bb2011-12-27 12:24:52 +00004471 int rto_ok = 1;
tuexendd729232011-11-01 23:04:43 +00004472 uint8_t reneged_all = 0;
4473 uint8_t cmt_dac_flag;
4474 /*
4475 * we take any chance we can to service our queues since we cannot
4476 * get awoken when the socket is read from :<
4477 */
4478 /*
4479 * Now perform the actual SACK handling: 1) Verify that it is not an
4480 * old sack, if so discard. 2) If there is nothing left in the send
4481 * queue (cum-ack is equal to last acked) then you have a duplicate
4482 * too, update any rwnd change and verify no timers are running.
4483 * then return. 3) Process any new consequtive data i.e. cum-ack
4484 * moved process these first and note that it moved. 4) Process any
4485 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4486 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4487 * sync up flightsizes and things, stop all timers and also check
4488 * for shutdown_pending state. If so then go ahead and send off the
4489 * shutdown. If in shutdown recv, send off the shutdown-ack and
4490 * start that timer, Ret. 9) Strike any non-acked things and do FR
4491 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4492 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4493 * if in shutdown_recv state.
4494 */
4495 SCTP_TCB_LOCK_ASSERT(stcb);
4496 /* CMT DAC algo */
4497 this_sack_lowest_newack = 0;
tuexendd729232011-11-01 23:04:43 +00004498 SCTP_STAT_INCR(sctps_slowpath_sack);
4499 last_tsn = cum_ack;
4500 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4501#ifdef SCTP_ASOCLOG_OF_TSNS
4502 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4503 stcb->asoc.cumack_log_at++;
4504 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4505 stcb->asoc.cumack_log_at = 0;
4506 }
4507#endif
4508 a_rwnd = rwnd;
4509
4510 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4511 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4512 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4513 }
4514
4515 old_rwnd = stcb->asoc.peers_rwnd;
4516 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4517 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4518 stcb->asoc.overall_error_count,
4519 0,
4520 SCTP_FROM_SCTP_INDATA,
4521 __LINE__);
4522 }
4523 stcb->asoc.overall_error_count = 0;
4524 asoc = &stcb->asoc;
4525 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4526 sctp_log_sack(asoc->last_acked_seq,
4527 cum_ack,
4528 0,
4529 num_seg,
4530 num_dup,
4531 SCTP_LOG_NEW_SACK);
4532 }
4533 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4534 uint16_t i;
4535 uint32_t *dupdata, dblock;
4536
4537 for (i = 0; i < num_dup; i++) {
4538 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4539 sizeof(uint32_t), (uint8_t *)&dblock);
4540 if (dupdata == NULL) {
4541 break;
4542 }
4543 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4544 }
4545 }
Michael Tuexen0ec21502016-05-12 18:39:01 +02004546 /* reality check */
4547 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4548 tp1 = TAILQ_LAST(&asoc->sent_queue,
4549 sctpchunk_listhead);
Michael Tuexen00657ac2016-12-07 21:53:26 +01004550 send_s = tp1->rec.data.tsn + 1;
Michael Tuexen0ec21502016-05-12 18:39:01 +02004551 } else {
4552 tp1 = NULL;
4553 send_s = asoc->sending_seq;
4554 }
4555 if (SCTP_TSN_GE(cum_ack, send_s)) {
4556 struct mbuf *op_err;
4557 char msg[SCTP_DIAG_INFO_LEN];
t00fcxen08f9ff92014-03-16 13:38:54 +00004558
Michael Tuexen0ec21502016-05-12 18:39:01 +02004559 /*
4560 * no way, we have not even sent this TSN out yet.
4561 * Peer is hopelessly messed up with us.
4562 */
4563 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4564 cum_ack, send_s);
4565 if (tp1) {
4566 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01004567 tp1->rec.data.tsn, (void *)tp1);
tuexendd729232011-11-01 23:04:43 +00004568 }
Michael Tuexen0ec21502016-05-12 18:39:01 +02004569 hopeless_peer:
4570 *abort_now = 1;
4571 /* XXX */
Michael Tuexenedd369d2020-05-19 09:42:15 +02004572 SCTP_SNPRINTF(msg, sizeof(msg),
4573 "Cum ack %8.8x greater or equal than TSN %8.8x",
4574 cum_ack, send_s);
Michael Tuexen0ec21502016-05-12 18:39:01 +02004575 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexen91565952020-02-03 23:23:28 +01004576 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
Michael Tuexen0ec21502016-05-12 18:39:01 +02004577 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4578 return;
tuexendd729232011-11-01 23:04:43 +00004579 }
4580 /**********************/
4581 /* 1) check the range */
4582 /**********************/
4583 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4584 /* acking something behind */
4585 return;
4586 }
tuexendd729232011-11-01 23:04:43 +00004587
4588 /* update the Rwnd of the peer */
4589 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4590 TAILQ_EMPTY(&asoc->send_queue) &&
4591 (asoc->stream_queue_cnt == 0)) {
4592 /* nothing left on send/sent and strmq */
4593 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4594 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4595 asoc->peers_rwnd, 0, 0, a_rwnd);
4596 }
4597 asoc->peers_rwnd = a_rwnd;
4598 if (asoc->sent_queue_retran_cnt) {
4599 asoc->sent_queue_retran_cnt = 0;
4600 }
4601 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4602 /* SWS sender side engages */
4603 asoc->peers_rwnd = 0;
4604 }
4605 /* stop any timers */
4606 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4607 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
Michael Tuexen91565952020-02-03 23:23:28 +01004608 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
tuexendd729232011-11-01 23:04:43 +00004609 net->partial_bytes_acked = 0;
4610 net->flight_size = 0;
4611 }
4612 asoc->total_flight = 0;
4613 asoc->total_flight_count = 0;
4614 return;
4615 }
4616 /*
4617 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4618 * things. The total byte count acked is tracked in netAckSz AND
4619 * netAck2 is used to track the total bytes acked that are un-
4620 * amibguious and were never retransmitted. We track these on a per
4621 * destination address basis.
4622 */
4623 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4624 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4625 /* Drag along the window_tsn for cwr's */
4626 net->cwr_window_tsn = cum_ack;
4627 }
4628 net->prev_cwnd = net->cwnd;
4629 net->net_ack = 0;
4630 net->net_ack2 = 0;
4631
4632 /*
4633 * CMT: Reset CUC and Fast recovery algo variables before
4634 * SACK processing
4635 */
4636 net->new_pseudo_cumack = 0;
4637 net->will_exit_fast_recovery = 0;
4638 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4639 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
4640 }
Michael Tuexen83714a82018-01-16 23:02:09 +01004641
4642 /*
4643 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4644 * to be greater than the cumack. Also reset saw_newack to 0
4645 * for all dests.
4646 */
4647 net->saw_newack = 0;
4648 net->this_sack_highest_newack = last_tsn;
tuexendd729232011-11-01 23:04:43 +00004649 }
4650 /* process the new consecutive TSN first */
4651 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004652 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
tuexendd729232011-11-01 23:04:43 +00004653 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4654 accum_moved = 1;
4655 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4656 /*
4657 * If it is less than ACKED, it is
4658 * now no-longer in flight. Higher
4659 * values may occur during marking
4660 */
4661 if ((tp1->whoTo->dest_state &
4662 SCTP_ADDR_UNCONFIRMED) &&
4663 (tp1->snd_count < 2)) {
4664 /*
4665 * If there was no retran
4666 * and the address is
4667 * un-confirmed and we sent
4668 * there and are now
4669 * sacked.. its confirmed,
4670 * mark it so.
4671 */
4672 tp1->whoTo->dest_state &=
4673 ~SCTP_ADDR_UNCONFIRMED;
4674 }
4675 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4676 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4677 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4678 tp1->whoTo->flight_size,
4679 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01004680 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01004681 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00004682 }
4683 sctp_flight_size_decrease(tp1);
4684 sctp_total_flight_decrease(stcb, tp1);
4685 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4686 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
4687 tp1);
4688 }
4689 }
4690 tp1->whoTo->net_ack += tp1->send_size;
4691
4692 /* CMT SFR and DAC algos */
Michael Tuexen00657ac2016-12-07 21:53:26 +01004693 this_sack_lowest_newack = tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00004694 tp1->whoTo->saw_newack = 1;
4695
4696 if (tp1->snd_count < 2) {
4697 /*
Michael Tuexenc51af972018-08-12 15:32:55 +02004698 * True non-retransmitted
tuexendd729232011-11-01 23:04:43 +00004699 * chunk
4700 */
4701 tp1->whoTo->net_ack2 +=
4702 tp1->send_size;
4703
4704 /* update RTO too? */
4705 if (tp1->do_rtt) {
Michael Tuexenb7ed78b2019-09-22 12:48:36 +02004706 if (rto_ok &&
4707 sctp_calculate_rto(stcb,
4708 &stcb->asoc,
4709 tp1->whoTo,
4710 &tp1->sent_rcv_time,
4711 SCTP_RTT_FROM_DATA)) {
tuexendd729232011-11-01 23:04:43 +00004712 rto_ok = 0;
4713 }
4714 if (tp1->whoTo->rto_needed == 0) {
4715 tp1->whoTo->rto_needed = 1;
4716 }
4717 tp1->do_rtt = 0;
4718 }
4719 }
4720 /*
4721 * CMT: CUCv2 algorithm. From the
4722 * cumack'd TSNs, for each TSN being
4723 * acked for the first time, set the
4724 * following variables for the
4725 * corresp destination.
4726 * new_pseudo_cumack will trigger a
4727 * cwnd update.
4728 * find_(rtx_)pseudo_cumack will
4729 * trigger search for the next
4730 * expected (rtx-)pseudo-cumack.
4731 */
4732 tp1->whoTo->new_pseudo_cumack = 1;
4733 tp1->whoTo->find_pseudo_cumack = 1;
4734 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4735
4736
4737 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4738 sctp_log_sack(asoc->last_acked_seq,
4739 cum_ack,
Michael Tuexen00657ac2016-12-07 21:53:26 +01004740 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00004741 0,
4742 0,
4743 SCTP_LOG_TSN_ACKED);
4744 }
4745 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004746 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
tuexendd729232011-11-01 23:04:43 +00004747 }
4748 }
4749 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4750 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4751#ifdef SCTP_AUDITING_ENABLED
4752 sctp_audit_log(0xB3,
4753 (asoc->sent_queue_retran_cnt & 0x000000ff));
4754#endif
4755 }
4756 if (tp1->rec.data.chunk_was_revoked) {
4757 /* deflate the cwnd */
4758 tp1->whoTo->cwnd -= tp1->book_size;
4759 tp1->rec.data.chunk_was_revoked = 0;
4760 }
t00fcxen8fcc5142012-11-16 19:46:12 +00004761 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4762 tp1->sent = SCTP_DATAGRAM_ACKED;
4763 }
tuexendd729232011-11-01 23:04:43 +00004764 }
4765 } else {
4766 break;
4767 }
4768 }
4769 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4770 /* always set this up to cum-ack */
4771 asoc->this_sack_highest_gap = last_tsn;
4772
4773 if ((num_seg > 0) || (num_nr_seg > 0)) {
4774
4775 /*
tuexendd729232011-11-01 23:04:43 +00004776 * thisSackHighestGap will increase while handling NEW
4777 * segments this_sack_highest_newack will increase while
4778 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4779 * used for CMT DAC algo. saw_newack will also change.
4780 */
4781 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4782 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
tuexen9784e9a2011-12-18 13:04:23 +00004783 num_seg, num_nr_seg, &rto_ok)) {
tuexendd729232011-11-01 23:04:43 +00004784 wake_him++;
4785 }
Michael Tuexen0ec21502016-05-12 18:39:01 +02004786 /*
4787 * validate the biggest_tsn_acked in the gap acks if
4788 * strict adherence is wanted.
4789 */
4790 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
tuexendd729232011-11-01 23:04:43 +00004791 /*
Michael Tuexen0ec21502016-05-12 18:39:01 +02004792 * peer is either confused or we are under
4793 * attack. We must abort.
tuexendd729232011-11-01 23:04:43 +00004794 */
Michael Tuexen0ec21502016-05-12 18:39:01 +02004795 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4796 biggest_tsn_acked, send_s);
4797 goto hopeless_peer;
tuexendd729232011-11-01 23:04:43 +00004798 }
4799 }
4800 /*******************************************/
4801 /* cancel ALL T3-send timer if accum moved */
4802 /*******************************************/
4803 if (asoc->sctp_cmt_on_off > 0) {
4804 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4805 if (net->new_pseudo_cumack)
4806 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4807 stcb, net,
Michael Tuexen91565952020-02-03 23:23:28 +01004808 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
tuexendd729232011-11-01 23:04:43 +00004809
4810 }
4811 } else {
4812 if (accum_moved) {
4813 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4814 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
Michael Tuexen91565952020-02-03 23:23:28 +01004815 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
tuexendd729232011-11-01 23:04:43 +00004816 }
4817 }
4818 }
4819 /********************************************/
4820 /* drop the acked chunks from the sentqueue */
4821 /********************************************/
4822 asoc->last_acked_seq = cum_ack;
4823
4824 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004825 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
tuexendd729232011-11-01 23:04:43 +00004826 break;
4827 }
t00fcxen8fcc5142012-11-16 19:46:12 +00004828 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004829 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4830 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
t00fcxen9ad90772012-11-07 22:19:57 +00004831#ifdef INVARIANTS
4832 } else {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004833 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
t00fcxen9ad90772012-11-07 22:19:57 +00004834#endif
4835 }
tuexendd729232011-11-01 23:04:43 +00004836 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01004837 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4838 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4839 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
Michael Tuexenc0a12d12015-12-03 16:30:24 +01004840 asoc->trigger_reset = 1;
4841 }
tuexendd729232011-11-01 23:04:43 +00004842 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
t00fcxen91ceb732013-09-03 19:40:11 +00004843 if (PR_SCTP_ENABLED(tp1->flags)) {
tuexendd729232011-11-01 23:04:43 +00004844 if (asoc->pr_sctp_cnt != 0)
4845 asoc->pr_sctp_cnt--;
4846 }
4847 asoc->sent_queue_cnt--;
4848 if (tp1->data) {
4849 /* sa_ignore NO_NULL_CHK */
4850 sctp_free_bufspace(stcb, asoc, tp1, 1);
4851 sctp_m_freem(tp1->data);
4852 tp1->data = NULL;
t00fcxen0e78cef2014-08-02 22:05:33 +00004853 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
tuexendd729232011-11-01 23:04:43 +00004854 asoc->sent_queue_cnt_removeable--;
4855 }
4856 }
4857 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4858 sctp_log_sack(asoc->last_acked_seq,
4859 cum_ack,
Michael Tuexen00657ac2016-12-07 21:53:26 +01004860 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00004861 0,
4862 0,
4863 SCTP_LOG_FREE_SENT);
4864 }
4865 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4866 wake_him++;
4867 }
4868 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4869#ifdef INVARIANTS
Michael Tuexen34488e72016-05-03 22:11:59 +02004870 panic("Warning flight size is positive and should be 0");
tuexendd729232011-11-01 23:04:43 +00004871#else
4872 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4873 asoc->total_flight);
4874#endif
4875 asoc->total_flight = 0;
4876 }
4877
tuexen98456cf2012-04-19 15:37:07 +00004878#if defined(__Userspace__)
4879 if (stcb->sctp_ep->recv_callback) {
4880 if (stcb->sctp_socket) {
4881 uint32_t inqueue_bytes, sb_free_now;
4882 struct sctp_inpcb *inp;
tuexen749d8562011-11-13 13:41:49 +00004883
tuexen98456cf2012-04-19 15:37:07 +00004884 inp = stcb->sctp_ep;
tuexen749d8562011-11-13 13:41:49 +00004885 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
tuexen1ee04c82012-04-19 16:35:13 +00004886 sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
tuexen98456cf2012-04-19 15:37:07 +00004887
4888 /* check if the amount free in the send socket buffer crossed the threshold */
4889 if (inp->send_callback &&
4890 (((inp->send_sb_threshold > 0) && (sb_free_now >= inp->send_sb_threshold)) ||
4891 (inp->send_sb_threshold == 0))) {
4892 atomic_add_int(&stcb->asoc.refcnt, 1);
4893 SCTP_TCB_UNLOCK(stcb);
4894 inp->send_callback(stcb->sctp_socket, sb_free_now);
4895 SCTP_TCB_LOCK(stcb);
4896 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4897 }
tuexen749d8562011-11-13 13:41:49 +00004898 }
tuexen98456cf2012-04-19 15:37:07 +00004899 } else if ((wake_him) && (stcb->sctp_socket)) {
tuexen749d8562011-11-13 13:41:49 +00004900#else
tuexendd729232011-11-01 23:04:43 +00004901 /* sa_ignore NO_NULL_CHK */
4902 if ((wake_him) && (stcb->sctp_socket)) {
tuexen98456cf2012-04-19 15:37:07 +00004903#endif
tuexen6bffa9a2012-06-25 17:40:03 +00004904#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
tuexendd729232011-11-01 23:04:43 +00004905 struct socket *so;
4906
4907#endif
4908 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4909 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
tuexen9784e9a2011-12-18 13:04:23 +00004910 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
tuexendd729232011-11-01 23:04:43 +00004911 }
tuexen6bffa9a2012-06-25 17:40:03 +00004912#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
tuexendd729232011-11-01 23:04:43 +00004913 so = SCTP_INP_SO(stcb->sctp_ep);
4914 atomic_add_int(&stcb->asoc.refcnt, 1);
4915 SCTP_TCB_UNLOCK(stcb);
4916 SCTP_SOCKET_LOCK(so, 1);
4917 SCTP_TCB_LOCK(stcb);
4918 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4919 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4920 /* assoc was freed while we were unlocked */
4921 SCTP_SOCKET_UNLOCK(so, 1);
4922 return;
4923 }
4924#endif
4925 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
tuexen6bffa9a2012-06-25 17:40:03 +00004926#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
tuexendd729232011-11-01 23:04:43 +00004927 SCTP_SOCKET_UNLOCK(so, 1);
4928#endif
4929 } else {
4930 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
tuexen9784e9a2011-12-18 13:04:23 +00004931 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
tuexendd729232011-11-01 23:04:43 +00004932 }
4933 }
tuexendd729232011-11-01 23:04:43 +00004934
4935 if (asoc->fast_retran_loss_recovery && accum_moved) {
4936 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4937 /* Setup so we will exit RFC2582 fast recovery */
4938 will_exit_fast_recovery = 1;
4939 }
4940 }
4941 /*
4942 * Check for revoked fragments:
4943 *
4944 * if Previous sack - Had no frags then we can't have any revoked if
4945 * Previous sack - Had frag's then - If we now have frags aka
4946 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4947 * some of them. else - The peer revoked all ACKED fragments, since
4948 * we had some before and now we have NONE.
4949 */
4950
4951 if (num_seg) {
4952 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4953 asoc->saw_sack_with_frags = 1;
4954 } else if (asoc->saw_sack_with_frags) {
4955 int cnt_revoked = 0;
4956
4957 /* Peer revoked all dg's marked or acked */
4958 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4959 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4960 tp1->sent = SCTP_DATAGRAM_SENT;
4961 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4962 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4963 tp1->whoTo->flight_size,
4964 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01004965 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01004966 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00004967 }
4968 sctp_flight_size_increase(tp1);
4969 sctp_total_flight_increase(stcb, tp1);
4970 tp1->rec.data.chunk_was_revoked = 1;
4971 /*
4972 * To ensure that this increase in
4973 * flightsize, which is artificial,
4974 * does not throttle the sender, we
4975 * also increase the cwnd
4976 * artificially.
4977 */
4978 tp1->whoTo->cwnd += tp1->book_size;
4979 cnt_revoked++;
4980 }
4981 }
4982 if (cnt_revoked) {
4983 reneged_all = 1;
4984 }
4985 asoc->saw_sack_with_frags = 0;
4986 }
4987 if (num_nr_seg > 0)
4988 asoc->saw_sack_with_nr_frags = 1;
4989 else
4990 asoc->saw_sack_with_nr_frags = 0;
4991
4992 /* JRS - Use the congestion control given in the CC module */
4993 if (ecne_seen == 0) {
4994 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4995 if (net->net_ack2 > 0) {
4996 /*
4997 * Karn's rule applies to clearing error count, this
4998 * is optional.
4999 */
5000 net->error_count = 0;
5001 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
5002 /* addr came good */
5003 net->dest_state |= SCTP_ADDR_REACHABLE;
5004 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
tuexenda53ff02012-05-14 09:00:59 +00005005 0, (void *)net, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00005006 }
5007
5008 if (net == stcb->asoc.primary_destination) {
5009 if (stcb->asoc.alternate) {
5010 /* release the alternate, primary is good */
5011 sctp_free_remote_addr(stcb->asoc.alternate);
5012 stcb->asoc.alternate = NULL;
5013 }
5014 }
5015
5016 if (net->dest_state & SCTP_ADDR_PF) {
5017 net->dest_state &= ~SCTP_ADDR_PF;
t00fcxen0057a6d2015-05-28 16:42:49 +00005018 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
5019 stcb->sctp_ep, stcb, net,
Michael Tuexen91565952020-02-03 23:23:28 +01005020 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
tuexendd729232011-11-01 23:04:43 +00005021 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
5022 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
5023 /* Done with this net */
5024 net->net_ack = 0;
5025 }
5026 /* restore any doubled timers */
5027 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
5028 if (net->RTO < stcb->asoc.minrto) {
5029 net->RTO = stcb->asoc.minrto;
5030 }
5031 if (net->RTO > stcb->asoc.maxrto) {
5032 net->RTO = stcb->asoc.maxrto;
5033 }
5034 }
5035 }
5036 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5037 }
5038
5039 if (TAILQ_EMPTY(&asoc->sent_queue)) {
5040 /* nothing left in-flight */
5041 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5042 /* stop all timers */
5043 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
t00fcxen0057a6d2015-05-28 16:42:49 +00005044 stcb, net,
Michael Tuexen91565952020-02-03 23:23:28 +01005045 SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
tuexendd729232011-11-01 23:04:43 +00005046 net->flight_size = 0;
5047 net->partial_bytes_acked = 0;
5048 }
5049 asoc->total_flight = 0;
5050 asoc->total_flight_count = 0;
5051 }
5052
5053 /**********************************/
5054 /* Now what about shutdown issues */
5055 /**********************************/
5056 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5057 /* nothing left on sendqueue.. consider done */
5058 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5059 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5060 asoc->peers_rwnd, 0, 0, a_rwnd);
5061 }
5062 asoc->peers_rwnd = a_rwnd;
5063 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5064 /* SWS sender side engages */
5065 asoc->peers_rwnd = 0;
5066 }
5067 /* clean up */
5068 if ((asoc->stream_queue_cnt == 1) &&
5069 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
Michael Tuexen348a36c2018-08-13 16:24:47 +02005070 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005071 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
Michael Tuexen348a36c2018-08-13 16:24:47 +02005072 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
tuexendd729232011-11-01 23:04:43 +00005073 }
Michael Tuexen74842cb2017-07-20 13:15:46 +02005074 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
Michael Tuexen348a36c2018-08-13 16:24:47 +02005075 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
Michael Tuexen74842cb2017-07-20 13:15:46 +02005076 (asoc->stream_queue_cnt == 1) &&
5077 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5078 struct mbuf *op_err;
5079
5080 *abort_now = 1;
5081 /* XXX */
5082 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
Michael Tuexen91565952020-02-03 23:23:28 +01005083 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_34;
Michael Tuexen74842cb2017-07-20 13:15:46 +02005084 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5085 return;
5086 }
tuexendd729232011-11-01 23:04:43 +00005087 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5088 (asoc->stream_queue_cnt == 0)) {
Michael Tuexen74842cb2017-07-20 13:15:46 +02005089 struct sctp_nets *netp;
t00fcxen08f9ff92014-03-16 13:38:54 +00005090
Michael Tuexen348a36c2018-08-13 16:24:47 +02005091 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5092 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
Michael Tuexen74842cb2017-07-20 13:15:46 +02005093 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
tuexendd729232011-11-01 23:04:43 +00005094 }
Michael Tuexen348a36c2018-08-13 16:24:47 +02005095 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
Michael Tuexen74842cb2017-07-20 13:15:46 +02005096 sctp_stop_timers_for_shutdown(stcb);
5097 if (asoc->alternate) {
5098 netp = asoc->alternate;
5099 } else {
5100 netp = asoc->primary_destination;
5101 }
5102 sctp_send_shutdown(stcb, netp);
5103 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5104 stcb->sctp_ep, stcb, netp);
5105 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
Michael Tuexend07a5f22020-03-19 23:34:46 +01005106 stcb->sctp_ep, stcb, NULL);
tuexendd729232011-11-01 23:04:43 +00005107 return;
Michael Tuexen348a36c2018-08-13 16:24:47 +02005108 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
tuexendd729232011-11-01 23:04:43 +00005109 (asoc->stream_queue_cnt == 0)) {
5110 struct sctp_nets *netp;
t00fcxend0ad16b2013-02-09 18:34:24 +00005111
tuexendd729232011-11-01 23:04:43 +00005112 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
Michael Tuexen348a36c2018-08-13 16:24:47 +02005113 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
tuexendd729232011-11-01 23:04:43 +00005114 sctp_stop_timers_for_shutdown(stcb);
t00fcxend0ad16b2013-02-09 18:34:24 +00005115 if (asoc->alternate) {
5116 netp = asoc->alternate;
5117 } else {
5118 netp = asoc->primary_destination;
5119 }
5120 sctp_send_shutdown_ack(stcb, netp);
tuexendd729232011-11-01 23:04:43 +00005121 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5122 stcb->sctp_ep, stcb, netp);
5123 return;
5124 }
5125 }
5126 /*
5127 * Now here we are going to recycle net_ack for a different use...
5128 * HEADS UP.
5129 */
5130 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5131 net->net_ack = 0;
5132 }
5133
5134 /*
5135 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5136 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5137 * automatically ensure that.
5138 */
5139 if ((asoc->sctp_cmt_on_off > 0) &&
5140 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5141 (cmt_dac_flag == 0)) {
5142 this_sack_lowest_newack = cum_ack;
5143 }
5144 if ((num_seg > 0) || (num_nr_seg > 0)) {
5145 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5146 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5147 }
5148 /* JRS - Use the congestion control given in the CC module */
5149 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5150
5151 /* Now are we exiting loss recovery ? */
5152 if (will_exit_fast_recovery) {
5153 /* Ok, we must exit fast recovery */
5154 asoc->fast_retran_loss_recovery = 0;
5155 }
5156 if ((asoc->sat_t3_loss_recovery) &&
5157 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5158 /* end satellite t3 loss recovery */
5159 asoc->sat_t3_loss_recovery = 0;
5160 }
5161 /*
5162 * CMT Fast recovery
5163 */
5164 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5165 if (net->will_exit_fast_recovery) {
5166 /* Ok, we must exit fast recovery */
5167 net->fast_retran_loss_recovery = 0;
5168 }
5169 }
5170
5171 /* Adjust and set the new rwnd value */
5172 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5173 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5174 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5175 }
5176 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5177 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5178 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5179 /* SWS sender side engages */
5180 asoc->peers_rwnd = 0;
5181 }
5182 if (asoc->peers_rwnd > old_rwnd) {
5183 win_probe_recovery = 1;
5184 }
5185
5186 /*
5187 * Now we must setup so we have a timer up for anyone with
5188 * outstanding data.
5189 */
5190 done_once = 0;
5191again:
5192 j = 0;
5193 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5194 if (win_probe_recovery && (net->window_probe)) {
5195 win_probe_recovered = 1;
5196 /*-
5197 * Find first chunk that was used with
5198 * window probe and clear the event. Put
5199 * it back into the send queue as if has
5200 * not been sent.
5201 */
5202 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5203 if (tp1->window_probe) {
tuexen9784e9a2011-12-18 13:04:23 +00005204 sctp_window_probe_recovery(stcb, asoc, tp1);
tuexendd729232011-11-01 23:04:43 +00005205 break;
5206 }
5207 }
5208 }
5209 if (net->flight_size) {
5210 j++;
5211 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5212 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5213 stcb->sctp_ep, stcb, net);
5214 }
5215 if (net->window_probe) {
5216 net->window_probe = 0;
5217 }
5218 } else {
5219 if (net->window_probe) {
5220 /* In window probes we must assure a timer is still running there */
5221 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5222 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5223 stcb->sctp_ep, stcb, net);
5224
5225 }
5226 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5227 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5228 stcb, net,
Michael Tuexen91565952020-02-03 23:23:28 +01005229 SCTP_FROM_SCTP_INDATA + SCTP_LOC_35);
tuexendd729232011-11-01 23:04:43 +00005230 }
5231 }
5232 }
5233 if ((j == 0) &&
5234 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5235 (asoc->sent_queue_retran_cnt == 0) &&
5236 (win_probe_recovered == 0) &&
5237 (done_once == 0)) {
5238 /* huh, this should not happen unless all packets
5239 * are PR-SCTP and marked to skip of course.
5240 */
5241 if (sctp_fs_audit(asoc)) {
5242 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5243 net->flight_size = 0;
5244 }
5245 asoc->total_flight = 0;
5246 asoc->total_flight_count = 0;
5247 asoc->sent_queue_retran_cnt = 0;
5248 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5249 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5250 sctp_flight_size_increase(tp1);
5251 sctp_total_flight_increase(stcb, tp1);
5252 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5253 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5254 }
5255 }
5256 }
5257 done_once = 1;
5258 goto again;
5259 }
5260 /*********************************************/
5261 /* Here we perform PR-SCTP procedures */
5262 /* (section 4.2) */
5263 /*********************************************/
5264 /* C1. update advancedPeerAckPoint */
5265 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5266 asoc->advanced_peer_ack_point = cum_ack;
5267 }
5268 /* C2. try to further move advancedPeerAckPoint ahead */
t00fcxen0e78cef2014-08-02 22:05:33 +00005269 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
tuexendd729232011-11-01 23:04:43 +00005270 struct sctp_tmit_chunk *lchk;
5271 uint32_t old_adv_peer_ack_point;
tuexen15f99d82012-04-19 16:08:38 +00005272
tuexendd729232011-11-01 23:04:43 +00005273 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5274 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5275 /* C3. See if we need to send a Fwd-TSN */
5276 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5277 /*
5278 * ISSUE with ECN, see FWD-TSN processing.
5279 */
5280 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5281 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5282 0xee, cum_ack, asoc->advanced_peer_ack_point,
5283 old_adv_peer_ack_point);
5284 }
5285 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5286 send_forward_tsn(stcb, asoc);
5287 } else if (lchk) {
5288 /* try to FR fwd-tsn's that get lost too */
5289 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5290 send_forward_tsn(stcb, asoc);
5291 }
5292 }
5293 }
Michael Tuexena8f3d9d2020-05-10 19:35:08 +02005294 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5295 if (lchk->whoTo != NULL) {
5296 break;
5297 }
5298 }
5299 if (lchk != NULL) {
tuexendd729232011-11-01 23:04:43 +00005300 /* Assure a timer is up */
5301 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5302 stcb->sctp_ep, stcb, lchk->whoTo);
5303 }
5304 }
5305 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5306 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5307 a_rwnd,
5308 stcb->asoc.peers_rwnd,
5309 stcb->asoc.total_flight,
5310 stcb->asoc.total_output_queue_size);
5311 }
5312}
5313
5314void
tuexen9784e9a2011-12-18 13:04:23 +00005315sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
tuexendd729232011-11-01 23:04:43 +00005316{
5317 /* Copy cum-ack */
5318 uint32_t cum_ack, a_rwnd;
5319
5320 cum_ack = ntohl(cp->cumulative_tsn_ack);
5321 /* Arrange so a_rwnd does NOT change */
5322 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5323
5324 /* Now call the express sack handling */
5325 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5326}
5327
5328static void
5329sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
Michael Tuexene5001952016-04-17 19:25:27 +02005330 struct sctp_stream_in *strmin)
tuexendd729232011-11-01 23:04:43 +00005331{
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005332 struct sctp_queued_to_read *control, *ncontrol;
tuexendd729232011-11-01 23:04:43 +00005333 struct sctp_association *asoc;
Michael Tuexen00657ac2016-12-07 21:53:26 +01005334 uint32_t mid;
5335 int need_reasm_check = 0;
tuexen15f99d82012-04-19 16:08:38 +00005336
tuexendd729232011-11-01 23:04:43 +00005337 asoc = &stcb->asoc;
Michael Tuexen00657ac2016-12-07 21:53:26 +01005338 mid = strmin->last_mid_delivered;
tuexendd729232011-11-01 23:04:43 +00005339 /*
5340 * First deliver anything prior to and including the stream no that
Michael Tuexene5001952016-04-17 19:25:27 +02005341 * came in.
tuexendd729232011-11-01 23:04:43 +00005342 */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005343 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5344 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
tuexendd729232011-11-01 23:04:43 +00005345 /* this is deliverable now */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005346 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5347 if (control->on_strm_q) {
5348 if (control->on_strm_q == SCTP_ON_ORDERED) {
5349 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5350 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5351 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
Michael Tuexenf6d20c52016-04-18 11:31:05 +02005352#ifdef INVARIANTS
Michael Tuexene5001952016-04-17 19:25:27 +02005353 } else {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02005354 panic("strmin: %p ctl: %p unknown %d",
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005355 strmin, control, control->on_strm_q);
Michael Tuexenf6d20c52016-04-18 11:31:05 +02005356#endif
Michael Tuexene5001952016-04-17 19:25:27 +02005357 }
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005358 control->on_strm_q = 0;
Michael Tuexene5001952016-04-17 19:25:27 +02005359 }
5360 /* subtract pending on streams */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005361 if (asoc->size_on_all_streams >= control->length) {
5362 asoc->size_on_all_streams -= control->length;
5363 } else {
5364#ifdef INVARIANTS
5365 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5366#else
5367 asoc->size_on_all_streams = 0;
5368#endif
5369 }
Michael Tuexene5001952016-04-17 19:25:27 +02005370 sctp_ucount_decr(asoc->cnt_on_all_streams);
5371 /* deliver it to at least the delivery-q */
5372 if (stcb->sctp_socket) {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005373 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02005374 sctp_add_to_readq(stcb->sctp_ep, stcb,
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005375 control,
Michael Tuexene5001952016-04-17 19:25:27 +02005376 &stcb->sctp_socket->so_rcv,
5377 1, SCTP_READ_LOCK_HELD,
5378 SCTP_SO_NOT_LOCKED);
5379 }
5380 } else {
5381 /* Its a fragmented message */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005382 if (control->first_frag_seen) {
Michael Tuexene5001952016-04-17 19:25:27 +02005383 /* Make it so this is next to deliver, we restore later */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005384 strmin->last_mid_delivered = control->mid - 1;
Michael Tuexene5001952016-04-17 19:25:27 +02005385 need_reasm_check = 1;
5386 break;
5387 }
tuexendd729232011-11-01 23:04:43 +00005388 }
5389 } else {
5390 /* no more delivery now. */
5391 break;
5392 }
5393 }
Michael Tuexene5001952016-04-17 19:25:27 +02005394 if (need_reasm_check) {
5395 int ret;
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005396 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
Michael Tuexen00657ac2016-12-07 21:53:26 +01005397 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
Michael Tuexene5001952016-04-17 19:25:27 +02005398 /* Restore the next to deliver unless we are ahead */
Michael Tuexen00657ac2016-12-07 21:53:26 +01005399 strmin->last_mid_delivered = mid;
Michael Tuexene5001952016-04-17 19:25:27 +02005400 }
5401 if (ret == 0) {
5402 /* Left the front Partial one on */
5403 return;
5404 }
5405 need_reasm_check = 0;
5406 }
tuexendd729232011-11-01 23:04:43 +00005407 /*
5408 * now we must deliver things in queue the normal way if any are
5409 * now ready.
5410 */
Michael Tuexen00657ac2016-12-07 21:53:26 +01005411 mid = strmin->last_mid_delivered + 1;
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005412 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5413 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5414 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
Michael Tuexene5001952016-04-17 19:25:27 +02005415 /* this is deliverable now */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005416 if (control->on_strm_q) {
5417 if (control->on_strm_q == SCTP_ON_ORDERED) {
5418 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5419 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5420 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
Michael Tuexenf6d20c52016-04-18 11:31:05 +02005421#ifdef INVARIANTS
Michael Tuexene5001952016-04-17 19:25:27 +02005422 } else {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02005423 panic("strmin: %p ctl: %p unknown %d",
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005424 strmin, control, control->on_strm_q);
Michael Tuexenf6d20c52016-04-18 11:31:05 +02005425#endif
Michael Tuexene5001952016-04-17 19:25:27 +02005426 }
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005427 control->on_strm_q = 0;
Michael Tuexene5001952016-04-17 19:25:27 +02005428 }
5429 /* subtract pending on streams */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005430 if (asoc->size_on_all_streams >= control->length) {
5431 asoc->size_on_all_streams -= control->length;
5432 } else {
5433#ifdef INVARIANTS
5434 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5435#else
5436 asoc->size_on_all_streams = 0;
5437#endif
5438 }
Michael Tuexene5001952016-04-17 19:25:27 +02005439 sctp_ucount_decr(asoc->cnt_on_all_streams);
5440 /* deliver it to at least the delivery-q */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005441 strmin->last_mid_delivered = control->mid;
Michael Tuexene5001952016-04-17 19:25:27 +02005442 if (stcb->sctp_socket) {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005443 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02005444 sctp_add_to_readq(stcb->sctp_ep, stcb,
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005445 control,
Michael Tuexene5001952016-04-17 19:25:27 +02005446 &stcb->sctp_socket->so_rcv, 1,
5447 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00005448
Michael Tuexene5001952016-04-17 19:25:27 +02005449 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01005450 mid = strmin->last_mid_delivered + 1;
Michael Tuexene5001952016-04-17 19:25:27 +02005451 } else {
5452 /* Its a fragmented message */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005453 if (control->first_frag_seen) {
Michael Tuexene5001952016-04-17 19:25:27 +02005454 /* Make it so this is next to deliver */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005455 strmin->last_mid_delivered = control->mid - 1;
Michael Tuexene5001952016-04-17 19:25:27 +02005456 need_reasm_check = 1;
5457 break;
5458 }
tuexendd729232011-11-01 23:04:43 +00005459 }
tuexendd729232011-11-01 23:04:43 +00005460 } else {
5461 break;
5462 }
5463 }
Michael Tuexene5001952016-04-17 19:25:27 +02005464 if (need_reasm_check) {
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005465 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
Michael Tuexene5001952016-04-17 19:25:27 +02005466 }
tuexendd729232011-11-01 23:04:43 +00005467}
5468
Michael Tuexene64d7732016-07-17 15:21:06 +02005469
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005470
tuexendd729232011-11-01 23:04:43 +00005471static void
5472sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5473 struct sctp_association *asoc,
Michael Tuexen00657ac2016-12-07 21:53:26 +01005474 uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
tuexendd729232011-11-01 23:04:43 +00005475{
Michael Tuexene5001952016-04-17 19:25:27 +02005476 struct sctp_queued_to_read *control;
5477 struct sctp_stream_in *strm;
tuexendd729232011-11-01 23:04:43 +00005478 struct sctp_tmit_chunk *chk, *nchk;
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005479 int cnt_removed=0;
Michael Tuexen00657ac2016-12-07 21:53:26 +01005480
tuexendd729232011-11-01 23:04:43 +00005481 /*
Michael Tuexene5001952016-04-17 19:25:27 +02005482 * For now large messages held on the stream reasm that are
tuexendd729232011-11-01 23:04:43 +00005483 * complete will be tossed too. We could in theory do more
5484 * work to spin through and stop after dumping one msg aka
5485 * seeing the start of a new msg at the head, and call the
5486 * delivery function... to see if it can be delivered... But
5487 * for now we just dump everything on the queue.
5488 */
Michael Tuexene5001952016-04-17 19:25:27 +02005489 strm = &asoc->strmin[stream];
Michael Tuexen00657ac2016-12-07 21:53:26 +01005490 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
Michael Tuexene5001952016-04-17 19:25:27 +02005491 if (control == NULL) {
5492 /* Not found */
5493 return;
5494 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01005495 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
Michael Tuexen93e6e552016-09-22 16:25:12 +02005496 return;
5497 }
Michael Tuexene5001952016-04-17 19:25:27 +02005498 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5499 /* Purge hanging chunks */
Michael Tuexen00657ac2016-12-07 21:53:26 +01005500 if (!asoc->idata_supported && (ordered == 0)) {
5501 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005502 break;
5503 }
5504 }
5505 cnt_removed++;
Michael Tuexene5001952016-04-17 19:25:27 +02005506 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005507 if (asoc->size_on_reasm_queue >= chk->send_size) {
5508 asoc->size_on_reasm_queue -= chk->send_size;
5509 } else {
5510#ifdef INVARIANTS
5511 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5512#else
5513 asoc->size_on_reasm_queue = 0;
5514#endif
5515 }
Michael Tuexene5001952016-04-17 19:25:27 +02005516 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5517 if (chk->data) {
5518 sctp_m_freem(chk->data);
5519 chk->data = NULL;
tuexendd729232011-11-01 23:04:43 +00005520 }
Michael Tuexene5001952016-04-17 19:25:27 +02005521 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5522 }
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005523 if (!TAILQ_EMPTY(&control->reasm)) {
5524 /* This has to be old data, unordered */
5525 if (control->data) {
5526 sctp_m_freem(control->data);
5527 control->data = NULL;
5528 }
5529 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5530 chk = TAILQ_FIRST(&control->reasm);
5531 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5532 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5533 sctp_add_chk_to_control(control, strm, stcb, asoc,
5534 chk, SCTP_READ_LOCK_HELD);
5535 }
5536 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5537 return;
5538 }
5539 if (control->on_strm_q == SCTP_ON_ORDERED) {
5540 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005541 if (asoc->size_on_all_streams >= control->length) {
5542 asoc->size_on_all_streams -= control->length;
5543 } else {
5544#ifdef INVARIANTS
5545 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5546#else
5547 asoc->size_on_all_streams = 0;
5548#endif
5549 }
5550 sctp_ucount_decr(asoc->cnt_on_all_streams);
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005551 control->on_strm_q = 0;
5552 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5553 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5554 control->on_strm_q = 0;
5555#ifdef INVARIANTS
5556 } else if (control->on_strm_q) {
5557 panic("strm: %p ctl: %p unknown %d",
5558 strm, control, control->on_strm_q);
5559#endif
5560 }
5561 control->on_strm_q = 0;
Michael Tuexene5001952016-04-17 19:25:27 +02005562 if (control->on_read_q == 0) {
5563 sctp_free_remote_addr(control->whoFrom);
5564 if (control->data) {
5565 sctp_m_freem(control->data);
5566 control->data = NULL;
tuexendd729232011-11-01 23:04:43 +00005567 }
Michael Tuexene5001952016-04-17 19:25:27 +02005568 sctp_free_a_readq(stcb, control);
tuexendd729232011-11-01 23:04:43 +00005569 }
5570}
5571
tuexendd729232011-11-01 23:04:43 +00005572void
5573sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5574 struct sctp_forward_tsn_chunk *fwd,
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005575 int *abort_flag, struct mbuf *m , int offset)
tuexendd729232011-11-01 23:04:43 +00005576{
5577 /* The pr-sctp fwd tsn */
5578 /*
5579 * here we will perform all the data receiver side steps for
5580 * processing FwdTSN, as required in by pr-sctp draft:
5581 *
5582 * Assume we get FwdTSN(x):
5583 *
Michael Tuexend3331282020-02-03 23:14:00 +01005584 * 1) update local cumTSN to x
5585 * 2) try to further advance cumTSN to x + others we have
5586 * 3) examine and update re-ordering queue on pr-in-streams
5587 * 4) clean up re-assembly queue
Michael Tuexene64d7732016-07-17 15:21:06 +02005588 * 5) Send a sack to report where we are.
tuexendd729232011-11-01 23:04:43 +00005589 */
5590 struct sctp_association *asoc;
5591 uint32_t new_cum_tsn, gap;
tuexen9784e9a2011-12-18 13:04:23 +00005592 unsigned int i, fwd_sz, m_size;
tuexendd729232011-11-01 23:04:43 +00005593 uint32_t str_seq;
5594 struct sctp_stream_in *strm;
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005595 struct sctp_queued_to_read *control, *sv;
tuexendd729232011-11-01 23:04:43 +00005596
tuexendd729232011-11-01 23:04:43 +00005597 asoc = &stcb->asoc;
5598 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5599 SCTPDBG(SCTP_DEBUG_INDATA1,
5600 "Bad size too small/big fwd-tsn\n");
5601 return;
5602 }
5603 m_size = (stcb->asoc.mapping_array_size << 3);
5604 /*************************************************************/
5605 /* 1. Here we update local cumTSN and shift the bitmap array */
5606 /*************************************************************/
5607 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5608
5609 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5610 /* Already got there ... */
5611 return;
5612 }
5613 /*
5614 * now we know the new TSN is more advanced, let's find the actual
5615 * gap
5616 */
5617 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5618 asoc->cumulative_tsn = new_cum_tsn;
5619 if (gap >= m_size) {
5620 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
t00fcxen08f9ff92014-03-16 13:38:54 +00005621 struct mbuf *op_err;
5622 char msg[SCTP_DIAG_INFO_LEN];
5623
tuexendd729232011-11-01 23:04:43 +00005624 /*
5625 * out of range (of single byte chunks in the rwnd I
5626 * give out). This must be an attacker.
5627 */
5628 *abort_flag = 1;
Michael Tuexenedd369d2020-05-19 09:42:15 +02005629 SCTP_SNPRINTF(msg, sizeof(msg),
5630 "New cum ack %8.8x too high, highest TSN %8.8x",
5631 new_cum_tsn, asoc->highest_tsn_inside_map);
t00fcxen08f9ff92014-03-16 13:38:54 +00005632 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexen91565952020-02-03 23:23:28 +01005633 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_36;
t00fcxen08f9ff92014-03-16 13:38:54 +00005634 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00005635 return;
5636 }
5637 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
tuexen15f99d82012-04-19 16:08:38 +00005638
tuexendd729232011-11-01 23:04:43 +00005639 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5640 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5641 asoc->highest_tsn_inside_map = new_cum_tsn;
tuexen15f99d82012-04-19 16:08:38 +00005642
tuexendd729232011-11-01 23:04:43 +00005643 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5644 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
tuexen15f99d82012-04-19 16:08:38 +00005645
tuexendd729232011-11-01 23:04:43 +00005646 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5647 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5648 }
5649 } else {
5650 SCTP_TCB_LOCK_ASSERT(stcb);
5651 for (i = 0; i <= gap; i++) {
5652 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5653 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5654 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5655 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5656 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5657 }
5658 }
5659 }
5660 }
5661 /*************************************************************/
5662 /* 2. Clear up re-assembly queue */
5663 /*************************************************************/
tuexendd729232011-11-01 23:04:43 +00005664
Michael Tuexene5001952016-04-17 19:25:27 +02005665 /* This is now done as part of clearing up the stream/seq */
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005666 if (asoc->idata_supported == 0) {
5667 uint16_t sid;
5668 /* Flush all the un-ordered data based on cum-tsn */
5669 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5670 for (sid = 0 ; sid < asoc->streamincnt; sid++) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01005671 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005672 }
5673 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5674 }
tuexendd729232011-11-01 23:04:43 +00005675 /*******************************************************/
5676 /* 3. Update the PR-stream re-ordering queues and fix */
5677 /* delivery issues as needed. */
5678 /*******************************************************/
5679 fwd_sz -= sizeof(*fwd);
5680 if (m && fwd_sz) {
5681 /* New method. */
5682 unsigned int num_str;
Michael Tuexen00657ac2016-12-07 21:53:26 +01005683 uint32_t mid, cur_mid;
5684 uint16_t sid;
Michael Tuexene64d7732016-07-17 15:21:06 +02005685 uint16_t ordered, flags;
tuexendd729232011-11-01 23:04:43 +00005686 struct sctp_strseq *stseq, strseqbuf;
Michael Tuexene5001952016-04-17 19:25:27 +02005687 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
tuexendd729232011-11-01 23:04:43 +00005688 offset += sizeof(*fwd);
5689
5690 SCTP_INP_READ_LOCK(stcb->sctp_ep);
Michael Tuexene5001952016-04-17 19:25:27 +02005691 if (asoc->idata_supported) {
5692 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
Michael Tuexene5001952016-04-17 19:25:27 +02005693 } else {
5694 num_str = fwd_sz / sizeof(struct sctp_strseq);
Michael Tuexene5001952016-04-17 19:25:27 +02005695 }
tuexendd729232011-11-01 23:04:43 +00005696 for (i = 0; i < num_str; i++) {
Michael Tuexene5001952016-04-17 19:25:27 +02005697 if (asoc->idata_supported) {
5698 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5699 sizeof(struct sctp_strseq_mid),
5700 (uint8_t *)&strseqbuf_m);
5701 offset += sizeof(struct sctp_strseq_mid);
5702 if (stseq_m == NULL) {
5703 break;
5704 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01005705 sid = ntohs(stseq_m->sid);
5706 mid = ntohl(stseq_m->mid);
Michael Tuexene64d7732016-07-17 15:21:06 +02005707 flags = ntohs(stseq_m->flags);
5708 if (flags & PR_SCTP_UNORDERED_FLAG) {
5709 ordered = 0;
5710 } else {
5711 ordered = 1;
5712 }
Michael Tuexene5001952016-04-17 19:25:27 +02005713 } else {
5714 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5715 sizeof(struct sctp_strseq),
5716 (uint8_t *)&strseqbuf);
5717 offset += sizeof(struct sctp_strseq);
5718 if (stseq == NULL) {
5719 break;
5720 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01005721 sid = ntohs(stseq->sid);
5722 mid = (uint32_t)ntohs(stseq->ssn);
Michael Tuexene64d7732016-07-17 15:21:06 +02005723 ordered = 1;
tuexendd729232011-11-01 23:04:43 +00005724 }
5725 /* Convert */
tuexendd729232011-11-01 23:04:43 +00005726
5727 /* now process */
5728
5729 /*
5730 * Ok we now look for the stream/seq on the read queue
5731 * where its not all delivered. If we find it we transmute the
5732 * read entry into a PDI_ABORTED.
5733 */
Michael Tuexen00657ac2016-12-07 21:53:26 +01005734 if (sid >= asoc->streamincnt) {
tuexendd729232011-11-01 23:04:43 +00005735 /* screwed up streams, stop! */
5736 break;
5737 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01005738 if ((asoc->str_of_pdapi == sid) &&
5739 (asoc->ssn_of_pdapi == mid)) {
tuexendd729232011-11-01 23:04:43 +00005740 /* If this is the one we were partially delivering
5741 * now then we no longer are. Note this will change
5742 * with the reassembly re-write.
5743 */
5744 asoc->fragmented_delivery_inprogress = 0;
5745 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01005746 strm = &asoc->strmin[sid];
5747 for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5748 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005749 }
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005750 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5751 if ((control->sinfo_stream == sid) &&
5752 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01005753 str_seq = (sid << 16) | (0x0000ffff & mid);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005754 control->pdapi_aborted = 1;
tuexendd729232011-11-01 23:04:43 +00005755 sv = stcb->asoc.control_pdapi;
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005756 control->end_added = 1;
5757 if (control->on_strm_q == SCTP_ON_ORDERED) {
5758 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5759 if (asoc->size_on_all_streams >= control->length) {
5760 asoc->size_on_all_streams -= control->length;
5761 } else {
Michael Tuexenf6d20c52016-04-18 11:31:05 +02005762#ifdef INVARIANTS
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005763 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5764#else
5765 asoc->size_on_all_streams = 0;
5766#endif
5767 }
5768 sctp_ucount_decr(asoc->cnt_on_all_streams);
5769 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5770 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5771#ifdef INVARIANTS
5772 } else if (control->on_strm_q) {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02005773 panic("strm: %p ctl: %p unknown %d",
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005774 strm, control, control->on_strm_q);
Michael Tuexenf6d20c52016-04-18 11:31:05 +02005775#endif
Michael Tuexene5001952016-04-17 19:25:27 +02005776 }
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005777 control->on_strm_q = 0;
5778 stcb->asoc.control_pdapi = control;
tuexendd729232011-11-01 23:04:43 +00005779 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5780 stcb,
5781 SCTP_PARTIAL_DELIVERY_ABORTED,
5782 (void *)&str_seq,
5783 SCTP_SO_NOT_LOCKED);
5784 stcb->asoc.control_pdapi = sv;
5785 break;
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005786 } else if ((control->sinfo_stream == sid) &&
5787 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
tuexendd729232011-11-01 23:04:43 +00005788 /* We are past our victim SSN */
5789 break;
5790 }
5791 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01005792 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
tuexendd729232011-11-01 23:04:43 +00005793 /* Update the sequence number */
Michael Tuexen00657ac2016-12-07 21:53:26 +01005794 strm->last_mid_delivered = mid;
tuexendd729232011-11-01 23:04:43 +00005795 }
5796 /* now kick the stream the new way */
Michael Tuexene5001952016-04-17 19:25:27 +02005797 /*sa_ignore NO_NULL_CHK*/
tuexendd729232011-11-01 23:04:43 +00005798 sctp_kick_prsctp_reorder_queue(stcb, strm);
5799 }
5800 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5801 }
5802 /*
5803 * Now slide thing forward.
5804 */
5805 sctp_slide_mapping_arrays(stcb);
tuexendd729232011-11-01 23:04:43 +00005806}