blob: cc8aed34109d3bf71011fad1803ac35136d79e3d [file] [log] [blame]
tuexendd729232011-11-01 23:04:43 +00001/*-
Michael Tuexen866a7312017-11-24 12:44:05 +01002 * SPDX-License-Identifier: BSD-3-Clause
3 *
tuexendd729232011-11-01 23:04:43 +00004 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
tuexen194eae12012-05-23 12:03:48 +00005 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
tuexendd729232011-11-01 23:04:43 +00007 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * a) Redistributions of source code must retain the above copyright notice,
tuexen9784e9a2011-12-18 13:04:23 +000012 * this list of conditions and the following disclaimer.
tuexendd729232011-11-01 23:04:43 +000013 *
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
tuexen9784e9a2011-12-18 13:04:23 +000016 * the documentation and/or other materials provided with the distribution.
tuexendd729232011-11-01 23:04:43 +000017 *
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
Michael Tuexen5be0c252020-06-13 00:53:56 +020035#if defined(__FreeBSD__) && !defined(__Userspace__)
tuexendd729232011-11-01 23:04:43 +000036#include <sys/cdefs.h>
Michael Tuexenb882f262021-01-31 10:07:47 +010037__FBSDID("$FreeBSD$");
tuexendd729232011-11-01 23:04:43 +000038#endif
39
40#include <netinet/sctp_os.h>
Michael Tuexen5be0c252020-06-13 00:53:56 +020041#if defined(__FreeBSD__) && !defined(__Userspace__)
Michael Tuexene5001952016-04-17 19:25:27 +020042#include <sys/proc.h>
43#endif
tuexendd729232011-11-01 23:04:43 +000044#include <netinet/sctp_var.h>
45#include <netinet/sctp_sysctl.h>
Michael Tuexen3121b802016-04-10 23:28:19 +020046#include <netinet/sctp_header.h>
Michael Tuexene5001952016-04-17 19:25:27 +020047#include <netinet/sctp_pcb.h>
tuexendd729232011-11-01 23:04:43 +000048#include <netinet/sctputil.h>
49#include <netinet/sctp_output.h>
Michael Tuexen3121b802016-04-10 23:28:19 +020050#include <netinet/sctp_uio.h>
Michael Tuexene5001952016-04-17 19:25:27 +020051#include <netinet/sctp_auth.h>
Michael Tuexen3121b802016-04-10 23:28:19 +020052#include <netinet/sctp_timer.h>
Michael Tuexene5001952016-04-17 19:25:27 +020053#include <netinet/sctp_asconf.h>
54#include <netinet/sctp_indata.h>
55#include <netinet/sctp_bsd_addr.h>
56#include <netinet/sctp_input.h>
57#include <netinet/sctp_crc32.h>
Michael Tuexen5be0c252020-06-13 00:53:56 +020058#if defined(__FreeBSD__) && !defined(__Userspace__)
Michael Tuexene5001952016-04-17 19:25:27 +020059#include <netinet/sctp_lock_bsd.h>
60#endif
tuexendd729232011-11-01 23:04:43 +000061/*
62 * NOTES: On the outbound side of things I need to check the sack timer to
63 * see if I should generate a sack into the chunk queue (if I have data to
64 * send that is and will be sending it .. for bundling.
65 *
66 * The callback in sctp_usrreq.c will get called when the socket is read from.
67 * This will cause sctp_service_queues() to get called on the top entry in
68 * the list.
69 */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +020070static uint32_t
Michael Tuexene5001952016-04-17 19:25:27 +020071sctp_add_chk_to_control(struct sctp_queued_to_read *control,
72 struct sctp_stream_in *strm,
73 struct sctp_tcb *stcb,
74 struct sctp_association *asoc,
Michael Tuexeneddbc5b2020-06-14 11:56:24 +020075 struct sctp_tmit_chunk *chk, int hold_rlock);
Michael Tuexene5001952016-04-17 19:25:27 +020076
tuexendd729232011-11-01 23:04:43 +000077void
78sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
79{
80 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
81}
82
83/* Calculate what the rwnd would be */
84uint32_t
85sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
86{
tuexen63fc0bb2011-12-27 12:24:52 +000087 uint32_t calc = 0;
tuexendd729232011-11-01 23:04:43 +000088
89 /*
90 * This is really set wrong with respect to a 1-2-m socket. Since
91 * the sb_cc is the count that everyone as put up. When we re-write
92 * sctp_soreceive then we will fix this so that ONLY this
93 * associations data is taken into account.
94 */
Michael Tuexene5001952016-04-17 19:25:27 +020095 if (stcb->sctp_socket == NULL) {
tuexendd729232011-11-01 23:04:43 +000096 return (calc);
Michael Tuexene5001952016-04-17 19:25:27 +020097 }
tuexendd729232011-11-01 23:04:43 +000098
Michael Tuexencdba1262017-11-05 13:05:10 +010099 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
100 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
101 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
102 ("size_on_all_streams is %u", asoc->size_on_all_streams));
tuexendd729232011-11-01 23:04:43 +0000103 if (stcb->asoc.sb_cc == 0 &&
Michael Tuexencdba1262017-11-05 13:05:10 +0100104 asoc->cnt_on_reasm_queue == 0 &&
105 asoc->cnt_on_all_streams == 0) {
tuexendd729232011-11-01 23:04:43 +0000106 /* Full rwnd granted */
107 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
108 return (calc);
109 }
110 /* get actual space */
111 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
tuexendd729232011-11-01 23:04:43 +0000112 /*
113 * take out what has NOT been put on socket queue and we yet hold
114 * for putting up.
115 */
116 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
117 asoc->cnt_on_reasm_queue * MSIZE));
118 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
119 asoc->cnt_on_all_streams * MSIZE));
tuexendd729232011-11-01 23:04:43 +0000120 if (calc == 0) {
121 /* out of space */
122 return (calc);
123 }
124
125 /* what is the overhead of all these rwnd's */
126 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
127 /* If the window gets too small due to ctrl-stuff, reduce it
128 * to 1, even it is 0. SWS engaged
129 */
130 if (calc < stcb->asoc.my_rwnd_control_len) {
131 calc = 1;
132 }
133 return (calc);
134}
135
tuexendd729232011-11-01 23:04:43 +0000136/*
137 * Build out our readq entry based on the incoming packet.
138 */
139struct sctp_queued_to_read *
140sctp_build_readq_entry(struct sctp_tcb *stcb,
141 struct sctp_nets *net,
142 uint32_t tsn, uint32_t ppid,
Michael Tuexen00657ac2016-12-07 21:53:26 +0100143 uint32_t context, uint16_t sid,
144 uint32_t mid, uint8_t flags,
tuexendd729232011-11-01 23:04:43 +0000145 struct mbuf *dm)
146{
147 struct sctp_queued_to_read *read_queue_e = NULL;
148
149 sctp_alloc_a_readq(stcb, read_queue_e);
150 if (read_queue_e == NULL) {
151 goto failed_build;
152 }
Michael Tuexene5001952016-04-17 19:25:27 +0200153 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
Michael Tuexen00657ac2016-12-07 21:53:26 +0100154 read_queue_e->sinfo_stream = sid;
tuexendd729232011-11-01 23:04:43 +0000155 read_queue_e->sinfo_flags = (flags << 8);
156 read_queue_e->sinfo_ppid = ppid;
tuexen9784e9a2011-12-18 13:04:23 +0000157 read_queue_e->sinfo_context = context;
tuexendd729232011-11-01 23:04:43 +0000158 read_queue_e->sinfo_tsn = tsn;
159 read_queue_e->sinfo_cumtsn = tsn;
160 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
Michael Tuexen00657ac2016-12-07 21:53:26 +0100161 read_queue_e->mid = mid;
Michael Tuexene5001952016-04-17 19:25:27 +0200162 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
163 TAILQ_INIT(&read_queue_e->reasm);
tuexendd729232011-11-01 23:04:43 +0000164 read_queue_e->whoFrom = net;
tuexendd729232011-11-01 23:04:43 +0000165 atomic_add_int(&net->ref_count, 1);
166 read_queue_e->data = dm;
tuexendd729232011-11-01 23:04:43 +0000167 read_queue_e->stcb = stcb;
168 read_queue_e->port_from = stcb->rport;
Michael Tuexend98d2c42020-05-18 14:09:04 +0200169 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
170 read_queue_e->do_not_ref_stcb = 1;
171 }
tuexendd729232011-11-01 23:04:43 +0000172failed_build:
173 return (read_queue_e);
174}
175
tuexendd729232011-11-01 23:04:43 +0000176struct mbuf *
177sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
178{
179 struct sctp_extrcvinfo *seinfo;
180 struct sctp_sndrcvinfo *outinfo;
181 struct sctp_rcvinfo *rcvinfo;
182 struct sctp_nxtinfo *nxtinfo;
Michael Tuexeneb611572020-06-13 17:27:02 +0200183#if defined(_WIN32)
t00fcxen8d8ec792012-09-04 22:31:29 +0000184 WSACMSGHDR *cmh;
185#else
tuexendd729232011-11-01 23:04:43 +0000186 struct cmsghdr *cmh;
t00fcxen8d8ec792012-09-04 22:31:29 +0000187#endif
tuexendd729232011-11-01 23:04:43 +0000188 struct mbuf *ret;
189 int len;
190 int use_extended;
191 int provide_nxt;
192
193 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
194 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
195 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
196 /* user does not want any ancillary data */
197 return (NULL);
198 }
199
200 len = 0;
201 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
202 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
203 }
204 seinfo = (struct sctp_extrcvinfo *)sinfo;
205 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
Michael Tuexene8185522015-11-06 14:17:33 +0100206 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
tuexendd729232011-11-01 23:04:43 +0000207 provide_nxt = 1;
Michael Tuexen94656502015-11-06 23:14:32 +0100208 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
tuexendd729232011-11-01 23:04:43 +0000209 } else {
210 provide_nxt = 0;
211 }
212 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
213 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
214 use_extended = 1;
215 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
216 } else {
217 use_extended = 0;
218 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
219 }
220 } else {
221 use_extended = 0;
222 }
223
t00fcxen23c2b8f2012-12-10 20:15:50 +0000224 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
tuexendd729232011-11-01 23:04:43 +0000225 if (ret == NULL) {
226 /* No space */
227 return (ret);
228 }
229 SCTP_BUF_LEN(ret) = 0;
230
231 /* We need a CMSG header followed by the struct */
Michael Tuexeneb611572020-06-13 17:27:02 +0200232#if defined(_WIN32)
t00fcxen8d8ec792012-09-04 22:31:29 +0000233 cmh = mtod(ret, WSACMSGHDR *);
234#else
tuexendd729232011-11-01 23:04:43 +0000235 cmh = mtod(ret, struct cmsghdr *);
t00fcxen8d8ec792012-09-04 22:31:29 +0000236#endif
t00fcxen6b2685d2014-07-11 06:33:20 +0000237 /*
238 * Make sure that there is no un-initialized padding between
239 * the cmsg header and cmsg data and after the cmsg data.
240 */
241 memset(cmh, 0, len);
tuexendd729232011-11-01 23:04:43 +0000242 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
243 cmh->cmsg_level = IPPROTO_SCTP;
244 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
245 cmh->cmsg_type = SCTP_RCVINFO;
246 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
247 rcvinfo->rcv_sid = sinfo->sinfo_stream;
248 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
249 rcvinfo->rcv_flags = sinfo->sinfo_flags;
250 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
251 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
252 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
253 rcvinfo->rcv_context = sinfo->sinfo_context;
254 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
Michael Tuexeneb611572020-06-13 17:27:02 +0200255#if defined(_WIN32)
t00fcxen8d8ec792012-09-04 22:31:29 +0000256 cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
257#else
tuexendd729232011-11-01 23:04:43 +0000258 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
t00fcxen8d8ec792012-09-04 22:31:29 +0000259#endif
tuexendd729232011-11-01 23:04:43 +0000260 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
261 }
262 if (provide_nxt) {
263 cmh->cmsg_level = IPPROTO_SCTP;
264 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
265 cmh->cmsg_type = SCTP_NXTINFO;
266 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
Michael Tuexene8185522015-11-06 14:17:33 +0100267 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
tuexendd729232011-11-01 23:04:43 +0000268 nxtinfo->nxt_flags = 0;
Michael Tuexene8185522015-11-06 14:17:33 +0100269 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
tuexendd729232011-11-01 23:04:43 +0000270 nxtinfo->nxt_flags |= SCTP_UNORDERED;
271 }
Michael Tuexene8185522015-11-06 14:17:33 +0100272 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
tuexendd729232011-11-01 23:04:43 +0000273 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
274 }
Michael Tuexene8185522015-11-06 14:17:33 +0100275 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
tuexendd729232011-11-01 23:04:43 +0000276 nxtinfo->nxt_flags |= SCTP_COMPLETE;
277 }
Michael Tuexene8185522015-11-06 14:17:33 +0100278 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
279 nxtinfo->nxt_length = seinfo->serinfo_next_length;
280 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
Michael Tuexeneb611572020-06-13 17:27:02 +0200281#if defined(_WIN32)
t00fcxen8d8ec792012-09-04 22:31:29 +0000282 cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
283#else
tuexendd729232011-11-01 23:04:43 +0000284 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
t00fcxen8d8ec792012-09-04 22:31:29 +0000285#endif
tuexendd729232011-11-01 23:04:43 +0000286 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
287 }
288 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
289 cmh->cmsg_level = IPPROTO_SCTP;
290 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
291 if (use_extended) {
292 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
293 cmh->cmsg_type = SCTP_EXTRCV;
294 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
295 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
296 } else {
297 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
298 cmh->cmsg_type = SCTP_SNDRCV;
299 *outinfo = *sinfo;
300 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
301 }
302 }
303 return (ret);
304}
305
tuexendd729232011-11-01 23:04:43 +0000306static void
307sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
308{
Michael Tuexenfd9f4fd2020-10-04 17:26:24 +0200309 uint32_t gap, i;
310 int in_r, in_nr;
311
tuexendd729232011-11-01 23:04:43 +0000312 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
313 return;
314 }
Michael Tuexenfd9f4fd2020-10-04 17:26:24 +0200315 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
316 /*
317 * This tsn is behind the cum ack and thus we don't
tuexendd729232011-11-01 23:04:43 +0000318 * need to worry about it being moved from one to the other.
319 */
320 return;
321 }
322 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
Michael Tuexene5001952016-04-17 19:25:27 +0200323 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
324 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
Michael Tuexenab7bbd92020-10-04 18:45:50 +0200325 KASSERT(in_r || in_nr, ("%s: Things are really messed up now", __func__));
Michael Tuexenfd9f4fd2020-10-04 17:26:24 +0200326 if (!in_nr) {
Michael Tuexene5001952016-04-17 19:25:27 +0200327 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
Michael Tuexenfd9f4fd2020-10-04 17:26:24 +0200328 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
329 asoc->highest_tsn_inside_nr_map = tsn;
tuexendd729232011-11-01 23:04:43 +0000330 }
Michael Tuexenfd9f4fd2020-10-04 17:26:24 +0200331 }
332 if (in_r) {
333 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
334 if (tsn == asoc->highest_tsn_inside_map) {
335 /* We must back down to see what the new highest is. */
336 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
337 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
338 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
339 asoc->highest_tsn_inside_map = i;
340 break;
341 }
342 }
343 if (!SCTP_TSN_GE(i, asoc->mapping_array_base_tsn)) {
344 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
345 }
tuexendd729232011-11-01 23:04:43 +0000346 }
347 }
348}
349
Michael Tuexene5001952016-04-17 19:25:27 +0200350static int
351sctp_place_control_in_stream(struct sctp_stream_in *strm,
352 struct sctp_association *asoc,
353 struct sctp_queued_to_read *control)
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200354{
Michael Tuexene5001952016-04-17 19:25:27 +0200355 struct sctp_queued_to_read *at;
356 struct sctp_readhead *q;
Michael Tuexendbfc1b82016-12-11 14:57:19 +0100357 uint8_t flags, unordered;
Michael Tuexen3121b802016-04-10 23:28:19 +0200358
Michael Tuexendbfc1b82016-12-11 14:57:19 +0100359 flags = (control->sinfo_flags >> 8);
360 unordered = flags & SCTP_DATA_UNORDERED;
Michael Tuexene5001952016-04-17 19:25:27 +0200361 if (unordered) {
362 q = &strm->uno_inqueue;
363 if (asoc->idata_supported == 0) {
364 if (!TAILQ_EMPTY(q)) {
365 /* Only one stream can be here in old style -- abort */
366 return (-1);
Michael Tuexen3121b802016-04-10 23:28:19 +0200367 }
Michael Tuexene5001952016-04-17 19:25:27 +0200368 TAILQ_INSERT_TAIL(q, control, next_instrm);
369 control->on_strm_q = SCTP_ON_UNORDERED;
370 return (0);
Michael Tuexen3121b802016-04-10 23:28:19 +0200371 }
Michael Tuexene5001952016-04-17 19:25:27 +0200372 } else {
373 q = &strm->inqueue;
Michael Tuexen3121b802016-04-10 23:28:19 +0200374 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +0100375 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
376 control->end_added = 1;
377 control->first_frag_seen = 1;
378 control->last_frag_seen = 1;
Michael Tuexene5001952016-04-17 19:25:27 +0200379 }
380 if (TAILQ_EMPTY(q)) {
381 /* Empty queue */
382 TAILQ_INSERT_HEAD(q, control, next_instrm);
383 if (unordered) {
384 control->on_strm_q = SCTP_ON_UNORDERED;
Michael Tuexen3121b802016-04-10 23:28:19 +0200385 } else {
Michael Tuexene5001952016-04-17 19:25:27 +0200386 control->on_strm_q = SCTP_ON_ORDERED;
387 }
388 return (0);
389 } else {
390 TAILQ_FOREACH(at, q, next_instrm) {
Michael Tuexen00657ac2016-12-07 21:53:26 +0100391 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
Michael Tuexen3121b802016-04-10 23:28:19 +0200392 /*
Michael Tuexene5001952016-04-17 19:25:27 +0200393 * one in queue is bigger than the
394 * new one, insert before this one
Michael Tuexen3121b802016-04-10 23:28:19 +0200395 */
Michael Tuexene5001952016-04-17 19:25:27 +0200396 TAILQ_INSERT_BEFORE(at, control, next_instrm);
397 if (unordered) {
398 control->on_strm_q = SCTP_ON_UNORDERED;
Michael Tuexen3121b802016-04-10 23:28:19 +0200399 } else {
Michael Tuexen196fbbe2020-09-28 17:04:25 +0200400 control->on_strm_q = SCTP_ON_ORDERED;
Michael Tuexen3121b802016-04-10 23:28:19 +0200401 }
Michael Tuexene5001952016-04-17 19:25:27 +0200402 break;
Michael Tuexen00657ac2016-12-07 21:53:26 +0100403 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
Michael Tuexene5001952016-04-17 19:25:27 +0200404 /*
405 * Gak, He sent me a duplicate msg
406 * id number?? return -1 to abort.
407 */
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200408 return (-1);
Michael Tuexene5001952016-04-17 19:25:27 +0200409 } else {
410 if (TAILQ_NEXT(at, next_instrm) == NULL) {
411 /*
412 * We are at the end, insert
413 * it after this one
414 */
415 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
416 sctp_log_strm_del(control, at,
417 SCTP_STR_LOG_FROM_INSERT_TL);
418 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +0100419 TAILQ_INSERT_AFTER(q, at, control, next_instrm);
Michael Tuexene5001952016-04-17 19:25:27 +0200420 if (unordered) {
Michael Tuexen196fbbe2020-09-28 17:04:25 +0200421 control->on_strm_q = SCTP_ON_UNORDERED;
Michael Tuexene5001952016-04-17 19:25:27 +0200422 } else {
Michael Tuexen196fbbe2020-09-28 17:04:25 +0200423 control->on_strm_q = SCTP_ON_ORDERED;
Michael Tuexene5001952016-04-17 19:25:27 +0200424 }
Michael Tuexen3121b802016-04-10 23:28:19 +0200425 break;
426 }
Michael Tuexen3121b802016-04-10 23:28:19 +0200427 }
Michael Tuexen3121b802016-04-10 23:28:19 +0200428 }
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200429 }
Michael Tuexene5001952016-04-17 19:25:27 +0200430 return (0);
431}
432
433static void
434sctp_abort_in_reasm(struct sctp_tcb *stcb,
Michael Tuexene5001952016-04-17 19:25:27 +0200435 struct sctp_queued_to_read *control,
436 struct sctp_tmit_chunk *chk,
437 int *abort_flag, int opspot)
438{
439 char msg[SCTP_DIAG_INFO_LEN];
440 struct mbuf *oper;
Michael Tuexena9d8c472016-04-18 22:22:59 +0200441
Michael Tuexene5001952016-04-17 19:25:27 +0200442 if (stcb->asoc.idata_supported) {
Michael Tuexenedd369d2020-05-19 09:42:15 +0200443 SCTP_SNPRINTF(msg, sizeof(msg),
444 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
445 opspot,
446 control->fsn_included,
447 chk->rec.data.tsn,
448 chk->rec.data.sid,
449 chk->rec.data.fsn, chk->rec.data.mid);
Michael Tuexene5001952016-04-17 19:25:27 +0200450 } else {
Michael Tuexenedd369d2020-05-19 09:42:15 +0200451 SCTP_SNPRINTF(msg, sizeof(msg),
452 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
453 opspot,
454 control->fsn_included,
455 chk->rec.data.tsn,
456 chk->rec.data.sid,
457 chk->rec.data.fsn,
458 (uint16_t)chk->rec.data.mid);
Michael Tuexene5001952016-04-17 19:25:27 +0200459 }
460 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
461 sctp_m_freem(chk->data);
462 chk->data = NULL;
463 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
464 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
Michael Tuexen1ade45c2021-07-09 23:32:42 +0200465 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, false, SCTP_SO_NOT_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +0200466 *abort_flag = 1;
467}
468
469static void
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200470sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
Michael Tuexene5001952016-04-17 19:25:27 +0200471{
Michael Tuexend3331282020-02-03 23:14:00 +0100472 /*
Michael Tuexene5001952016-04-17 19:25:27 +0200473 * The control could not be placed and must be cleaned.
474 */
475 struct sctp_tmit_chunk *chk, *nchk;
476 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
477 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
478 if (chk->data)
479 sctp_m_freem(chk->data);
480 chk->data = NULL;
481 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
482 }
Michael Tuexenb07df882019-09-24 15:21:23 +0200483 sctp_free_remote_addr(control->whoFrom);
484 if (control->data) {
485 sctp_m_freem(control->data);
486 control->data = NULL;
487 }
488 sctp_free_a_readq(stcb, control);
tuexendd729232011-11-01 23:04:43 +0000489}
490
491/*
492 * Queue the chunk either right into the socket buffer if it is the next one
493 * to go OR put it in the correct place in the delivery queue. If we do
Michael Tuexene5001952016-04-17 19:25:27 +0200494 * append to the so_buf, keep doing so until we are out of order as
495 * long as the control's entered are non-fragmented.
tuexendd729232011-11-01 23:04:43 +0000496 */
497static void
Michael Tuexene5001952016-04-17 19:25:27 +0200498sctp_queue_data_to_stream(struct sctp_tcb *stcb,
Michael Tuexene5001952016-04-17 19:25:27 +0200499 struct sctp_association *asoc,
500 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
tuexendd729232011-11-01 23:04:43 +0000501{
502 /*
503 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
504 * all the data in one stream this could happen quite rapidly. One
505 * could use the TSN to keep track of things, but this scheme breaks
Michael Tuexen34488e72016-05-03 22:11:59 +0200506 * down in the other type of stream usage that could occur. Send a
tuexendd729232011-11-01 23:04:43 +0000507 * single msg to stream 0, send 4Billion messages to stream 1, now
508 * send a message to stream 0. You have a situation where the TSN
509 * has wrapped but not in the stream. Is this worth worrying about
510 * or should we just change our queue sort at the bottom to be by
511 * TSN.
512 *
513 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
514 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
515 * assignment this could happen... and I don't see how this would be
516 * a violation. So for now I am undecided an will leave the sort by
David Sandersb519f512022-03-29 13:55:16 -0700517 * SSN alone. Maybe a hybrid approach is the answer
tuexendd729232011-11-01 23:04:43 +0000518 *
519 */
tuexendd729232011-11-01 23:04:43 +0000520 struct sctp_queued_to_read *at;
521 int queue_needed;
Michael Tuexene5001952016-04-17 19:25:27 +0200522 uint32_t nxt_todel;
t00fcxen08f9ff92014-03-16 13:38:54 +0000523 struct mbuf *op_err;
Michael Tuexene411f662016-12-17 23:36:21 +0100524 struct sctp_stream_in *strm;
t00fcxen08f9ff92014-03-16 13:38:54 +0000525 char msg[SCTP_DIAG_INFO_LEN];
tuexen15f99d82012-04-19 16:08:38 +0000526
Michael Tuexene411f662016-12-17 23:36:21 +0100527 strm = &asoc->strmin[control->sinfo_stream];
tuexendd729232011-11-01 23:04:43 +0000528 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
529 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
530 }
Michael Tuexen00657ac2016-12-07 21:53:26 +0100531 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
tuexendd729232011-11-01 23:04:43 +0000532 /* The incoming sseq is behind where we last delivered? */
Michael Tuexeneccb4be2016-04-18 08:58:59 +0200533 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
Michael Tuexene411f662016-12-17 23:36:21 +0100534 strm->last_mid_delivered, control->mid);
tuexendd729232011-11-01 23:04:43 +0000535 /*
536 * throw it in the stream so it gets cleaned up in
537 * association destruction
538 */
Michael Tuexene5001952016-04-17 19:25:27 +0200539 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
Michael Tuexen00657ac2016-12-07 21:53:26 +0100540 if (asoc->idata_supported) {
Michael Tuexenedd369d2020-05-19 09:42:15 +0200541 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
542 strm->last_mid_delivered, control->sinfo_tsn,
543 control->sinfo_stream, control->mid);
Michael Tuexen00657ac2016-12-07 21:53:26 +0100544 } else {
Michael Tuexenedd369d2020-05-19 09:42:15 +0200545 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
546 (uint16_t)strm->last_mid_delivered,
547 control->sinfo_tsn,
548 control->sinfo_stream,
549 (uint16_t)control->mid);
Michael Tuexen00657ac2016-12-07 21:53:26 +0100550 }
t00fcxen08f9ff92014-03-16 13:38:54 +0000551 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexene5001952016-04-17 19:25:27 +0200552 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
Michael Tuexen1ade45c2021-07-09 23:32:42 +0200553 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +0000554 *abort_flag = 1;
555 return;
tuexendd729232011-11-01 23:04:43 +0000556 }
Michael Tuexene5001952016-04-17 19:25:27 +0200557 queue_needed = 1;
558 asoc->size_on_all_streams += control->length;
559 sctp_ucount_incr(asoc->cnt_on_all_streams);
Michael Tuexen00657ac2016-12-07 21:53:26 +0100560 nxt_todel = strm->last_mid_delivered + 1;
561 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
Michael Tuexen5be0c252020-06-13 00:53:56 +0200562#if defined(__APPLE__) && !defined(__Userspace__)
Michael Tuexene5001952016-04-17 19:25:27 +0200563 struct socket *so;
564
565 so = SCTP_INP_SO(stcb->sctp_ep);
566 atomic_add_int(&stcb->asoc.refcnt, 1);
567 SCTP_TCB_UNLOCK(stcb);
568 SCTP_SOCKET_LOCK(so, 1);
569 SCTP_TCB_LOCK(stcb);
570 atomic_subtract_int(&stcb->asoc.refcnt, 1);
571 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
572 SCTP_SOCKET_UNLOCK(so, 1);
573 return;
574 }
575#endif
tuexendd729232011-11-01 23:04:43 +0000576 /* can be delivered right away? */
577 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
578 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
579 }
Michael Tuexene411f662016-12-17 23:36:21 +0100580 /* EY it wont be queued if it could be delivered directly */
tuexendd729232011-11-01 23:04:43 +0000581 queue_needed = 0;
Michael Tuexenbe5e3e72017-07-19 14:44:48 +0200582 if (asoc->size_on_all_streams >= control->length) {
583 asoc->size_on_all_streams -= control->length;
584 } else {
585#ifdef INVARIANTS
586 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
587#else
588 asoc->size_on_all_streams = 0;
589#endif
590 }
tuexendd729232011-11-01 23:04:43 +0000591 sctp_ucount_decr(asoc->cnt_on_all_streams);
Michael Tuexen00657ac2016-12-07 21:53:26 +0100592 strm->last_mid_delivered++;
tuexendd729232011-11-01 23:04:43 +0000593 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
594 sctp_add_to_readq(stcb->sctp_ep, stcb,
595 control,
596 &stcb->sctp_socket->so_rcv, 1,
Michael Tuexen6ecb9e42016-05-14 15:47:51 +0200597 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +0200598 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
tuexendd729232011-11-01 23:04:43 +0000599 /* all delivered */
Michael Tuexen00657ac2016-12-07 21:53:26 +0100600 nxt_todel = strm->last_mid_delivered + 1;
601 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
Michael Tuexene5001952016-04-17 19:25:27 +0200602 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
Michael Tuexene5001952016-04-17 19:25:27 +0200603 if (control->on_strm_q == SCTP_ON_ORDERED) {
604 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +0200605 if (asoc->size_on_all_streams >= control->length) {
606 asoc->size_on_all_streams -= control->length;
607 } else {
608#ifdef INVARIANTS
609 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
610#else
611 asoc->size_on_all_streams = 0;
612#endif
613 }
614 sctp_ucount_decr(asoc->cnt_on_all_streams);
Michael Tuexenf6d20c52016-04-18 11:31:05 +0200615#ifdef INVARIANTS
Michael Tuexene5001952016-04-17 19:25:27 +0200616 } else {
Michael Tuexeneccb4be2016-04-18 08:58:59 +0200617 panic("Huh control: %p is on_strm_q: %d",
Michael Tuexene5001952016-04-17 19:25:27 +0200618 control, control->on_strm_q);
Michael Tuexenf6d20c52016-04-18 11:31:05 +0200619#endif
Michael Tuexene5001952016-04-17 19:25:27 +0200620 }
621 control->on_strm_q = 0;
Michael Tuexen00657ac2016-12-07 21:53:26 +0100622 strm->last_mid_delivered++;
tuexendd729232011-11-01 23:04:43 +0000623 /*
624 * We ignore the return of deliver_data here
625 * since we always can hold the chunk on the
626 * d-queue. And we have a finite number that
627 * can be delivered from the strq.
628 */
629 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
630 sctp_log_strm_del(control, NULL,
631 SCTP_STR_LOG_FROM_IMMED_DEL);
632 }
633 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
634 sctp_add_to_readq(stcb->sctp_ep, stcb,
635 control,
636 &stcb->sctp_socket->so_rcv, 1,
637 SCTP_READ_LOCK_NOT_HELD,
Michael Tuexen6ecb9e42016-05-14 15:47:51 +0200638 SCTP_SO_LOCKED);
tuexendd729232011-11-01 23:04:43 +0000639 continue;
Michael Tuexen00657ac2016-12-07 21:53:26 +0100640 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
Michael Tuexene5001952016-04-17 19:25:27 +0200641 *need_reasm = 1;
tuexendd729232011-11-01 23:04:43 +0000642 }
643 break;
644 }
Michael Tuexen5be0c252020-06-13 00:53:56 +0200645#if defined(__APPLE__) && !defined(__Userspace__)
Michael Tuexene5001952016-04-17 19:25:27 +0200646 SCTP_SOCKET_UNLOCK(so, 1);
647#endif
tuexendd729232011-11-01 23:04:43 +0000648 }
649 if (queue_needed) {
650 /*
651 * Ok, we did not deliver this guy, find the correct place
652 * to put it on the queue.
653 */
Michael Tuexene5001952016-04-17 19:25:27 +0200654 if (sctp_place_control_in_stream(strm, asoc, control)) {
Michael Tuexenedd369d2020-05-19 09:42:15 +0200655 SCTP_SNPRINTF(msg, sizeof(msg),
656 "Queue to str MID: %u duplicate", control->mid);
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200657 sctp_clean_up_control(stcb, control);
Michael Tuexena9d8c472016-04-18 22:22:59 +0200658 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexene5001952016-04-17 19:25:27 +0200659 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
Michael Tuexen1ade45c2021-07-09 23:32:42 +0200660 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +0200661 *abort_flag = 1;
tuexendd729232011-11-01 23:04:43 +0000662 }
663 }
Michael Tuexen3121b802016-04-10 23:28:19 +0200664}
665
Michael Tuexene5001952016-04-17 19:25:27 +0200666static void
667sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
668{
669 struct mbuf *m, *prev = NULL;
670 struct sctp_tcb *stcb;
671
672 stcb = control->stcb;
673 control->held_length = 0;
674 control->length = 0;
675 m = control->data;
676 while (m) {
677 if (SCTP_BUF_LEN(m) == 0) {
678 /* Skip mbufs with NO length */
679 if (prev == NULL) {
680 /* First one */
681 control->data = sctp_m_free(m);
682 m = control->data;
683 } else {
684 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
685 m = SCTP_BUF_NEXT(prev);
686 }
687 if (m == NULL) {
688 control->tail_mbuf = prev;
689 }
690 continue;
Michael Tuexen3121b802016-04-10 23:28:19 +0200691 }
Michael Tuexene5001952016-04-17 19:25:27 +0200692 prev = m;
693 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
694 if (control->on_read_q) {
695 /*
696 * On read queue so we must increment the
697 * SB stuff, we assume caller has done any locks of SB.
698 */
699 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
Michael Tuexen3121b802016-04-10 23:28:19 +0200700 }
Michael Tuexene5001952016-04-17 19:25:27 +0200701 m = SCTP_BUF_NEXT(m);
Michael Tuexen3121b802016-04-10 23:28:19 +0200702 }
Michael Tuexene5001952016-04-17 19:25:27 +0200703 if (prev) {
704 control->tail_mbuf = prev;
705 }
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200706}
707
708static void
Michael Tuexenbe5e3e72017-07-19 14:44:48 +0200709sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200710{
Michael Tuexene5001952016-04-17 19:25:27 +0200711 struct mbuf *prev=NULL;
712 struct sctp_tcb *stcb;
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200713
Michael Tuexene5001952016-04-17 19:25:27 +0200714 stcb = control->stcb;
715 if (stcb == NULL) {
Michael Tuexenf6d20c52016-04-18 11:31:05 +0200716#ifdef INVARIANTS
Michael Tuexene5001952016-04-17 19:25:27 +0200717 panic("Control broken");
Michael Tuexenf6d20c52016-04-18 11:31:05 +0200718#else
719 return;
720#endif
Michael Tuexene5001952016-04-17 19:25:27 +0200721 }
722 if (control->tail_mbuf == NULL) {
723 /* TSNH */
Michael Tuexenc38740e2019-10-06 10:52:55 +0200724 sctp_m_freem(control->data);
Michael Tuexene5001952016-04-17 19:25:27 +0200725 control->data = m;
726 sctp_setup_tail_pointer(control);
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200727 return;
728 }
Michael Tuexene5001952016-04-17 19:25:27 +0200729 control->tail_mbuf->m_next = m;
730 while (m) {
731 if (SCTP_BUF_LEN(m) == 0) {
732 /* Skip mbufs with NO length */
733 if (prev == NULL) {
734 /* First one */
735 control->tail_mbuf->m_next = sctp_m_free(m);
736 m = control->tail_mbuf->m_next;
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200737 } else {
Michael Tuexene5001952016-04-17 19:25:27 +0200738 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
739 m = SCTP_BUF_NEXT(prev);
Michael Tuexen3121b802016-04-10 23:28:19 +0200740 }
Michael Tuexene5001952016-04-17 19:25:27 +0200741 if (m == NULL) {
742 control->tail_mbuf = prev;
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200743 }
Michael Tuexene5001952016-04-17 19:25:27 +0200744 continue;
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200745 }
Michael Tuexene5001952016-04-17 19:25:27 +0200746 prev = m;
747 if (control->on_read_q) {
748 /*
749 * On read queue so we must increment the
750 * SB stuff, we assume caller has done any locks of SB.
Michael Tuexen3121b802016-04-10 23:28:19 +0200751 */
Michael Tuexene5001952016-04-17 19:25:27 +0200752 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
753 }
Michael Tuexenbe5e3e72017-07-19 14:44:48 +0200754 *added += SCTP_BUF_LEN(m);
Michael Tuexene5001952016-04-17 19:25:27 +0200755 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
756 m = SCTP_BUF_NEXT(m);
757 }
758 if (prev) {
759 control->tail_mbuf = prev;
760 }
761}
762
Michael Tuexend3331282020-02-03 23:14:00 +0100763static void
Michael Tuexene5001952016-04-17 19:25:27 +0200764sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
765{
766 memset(nc, 0, sizeof(struct sctp_queued_to_read));
767 nc->sinfo_stream = control->sinfo_stream;
Michael Tuexen00657ac2016-12-07 21:53:26 +0100768 nc->mid = control->mid;
Michael Tuexene5001952016-04-17 19:25:27 +0200769 TAILQ_INIT(&nc->reasm);
770 nc->top_fsn = control->top_fsn;
Michael Tuexen00657ac2016-12-07 21:53:26 +0100771 nc->mid = control->mid;
Michael Tuexene5001952016-04-17 19:25:27 +0200772 nc->sinfo_flags = control->sinfo_flags;
773 nc->sinfo_ppid = control->sinfo_ppid;
774 nc->sinfo_context = control->sinfo_context;
775 nc->fsn_included = 0xffffffff;
776 nc->sinfo_tsn = control->sinfo_tsn;
777 nc->sinfo_cumtsn = control->sinfo_cumtsn;
778 nc->sinfo_assoc_id = control->sinfo_assoc_id;
779 nc->whoFrom = control->whoFrom;
780 atomic_add_int(&nc->whoFrom->ref_count, 1);
781 nc->stcb = control->stcb;
782 nc->port_from = control->port_from;
Michael Tuexend98d2c42020-05-18 14:09:04 +0200783 nc->do_not_ref_stcb = control->do_not_ref_stcb;
Michael Tuexene5001952016-04-17 19:25:27 +0200784}
785
Michael Tuexend3331282020-02-03 23:14:00 +0100786static void
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200787sctp_reset_a_control(struct sctp_queued_to_read *control,
788 struct sctp_inpcb *inp, uint32_t tsn)
789{
790 control->fsn_included = tsn;
791 if (control->on_read_q) {
Michael Tuexend3331282020-02-03 23:14:00 +0100792 /*
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200793 * We have to purge it from there,
794 * hopefully this will work :-)
795 */
796 TAILQ_REMOVE(&inp->read_queue, control, next);
797 control->on_read_q = 0;
798 }
799}
800
Michael Tuexene5001952016-04-17 19:25:27 +0200801static int
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200802sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
803 struct sctp_association *asoc,
804 struct sctp_stream_in *strm,
805 struct sctp_queued_to_read *control,
806 uint32_t pd_point,
807 int inp_read_lock_held)
Michael Tuexene5001952016-04-17 19:25:27 +0200808{
809 /* Special handling for the old un-ordered data chunk.
Michael Tuexen00657ac2016-12-07 21:53:26 +0100810 * All the chunks/TSN's go to mid 0. So
Michael Tuexene5001952016-04-17 19:25:27 +0200811 * we have to do the old style watching to see
812 * if we have it all. If you return one, no other
813 * control entries on the un-ordered queue will
814 * be looked at. In theory there should be no others
815 * entries in reality, unless the guy is sending both
816 * unordered NDATA and unordered DATA...
817 */
818 struct sctp_tmit_chunk *chk, *lchk, *tchk;
819 uint32_t fsn;
Michael Tuexenc0d26d92016-07-16 12:22:25 +0200820 struct sctp_queued_to_read *nc;
Michael Tuexene5001952016-04-17 19:25:27 +0200821 int cnt_added;
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200822
Michael Tuexene5001952016-04-17 19:25:27 +0200823 if (control->first_frag_seen == 0) {
824 /* Nothing we can do, we have not seen the first piece yet */
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200825 return (1);
Michael Tuexene5001952016-04-17 19:25:27 +0200826 }
827 /* Collapse any we can */
828 cnt_added = 0;
829restart:
830 fsn = control->fsn_included + 1;
831 /* Now what can we add? */
832 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
Michael Tuexen00657ac2016-12-07 21:53:26 +0100833 if (chk->rec.data.fsn == fsn) {
Michael Tuexene5001952016-04-17 19:25:27 +0200834 /* Ok lets add it */
Michael Tuexenc0d26d92016-07-16 12:22:25 +0200835 sctp_alloc_a_readq(stcb, nc);
836 if (nc == NULL) {
837 break;
838 }
Michael Tuexen2b62a392016-07-16 13:59:54 +0200839 memset(nc, 0, sizeof(struct sctp_queued_to_read));
Michael Tuexene5001952016-04-17 19:25:27 +0200840 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
Michael Tuexenb882f262021-01-31 10:07:47 +0100841 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, inp_read_lock_held);
Michael Tuexene5001952016-04-17 19:25:27 +0200842 fsn++;
843 cnt_added++;
844 chk = NULL;
845 if (control->end_added) {
846 /* We are done */
847 if (!TAILQ_EMPTY(&control->reasm)) {
Michael Tuexend3331282020-02-03 23:14:00 +0100848 /*
Michael Tuexene5001952016-04-17 19:25:27 +0200849 * Ok we have to move anything left on
850 * the control queue to a new control.
851 */
Michael Tuexene5001952016-04-17 19:25:27 +0200852 sctp_build_readq_entry_from_ctl(nc, control);
853 tchk = TAILQ_FIRST(&control->reasm);
854 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
855 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +0200856 if (asoc->size_on_reasm_queue >= tchk->send_size) {
857 asoc->size_on_reasm_queue -= tchk->send_size;
858 } else {
859#ifdef INVARIANTS
860 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
861#else
862 asoc->size_on_reasm_queue = 0;
863#endif
864 }
Michael Tuexen93e6e552016-09-22 16:25:12 +0200865 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
Michael Tuexene5001952016-04-17 19:25:27 +0200866 nc->first_frag_seen = 1;
Michael Tuexen00657ac2016-12-07 21:53:26 +0100867 nc->fsn_included = tchk->rec.data.fsn;
Michael Tuexene5001952016-04-17 19:25:27 +0200868 nc->data = tchk->data;
Michael Tuexen00657ac2016-12-07 21:53:26 +0100869 nc->sinfo_ppid = tchk->rec.data.ppid;
870 nc->sinfo_tsn = tchk->rec.data.tsn;
871 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
Michael Tuexene5001952016-04-17 19:25:27 +0200872 tchk->data = NULL;
873 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
874 sctp_setup_tail_pointer(nc);
875 tchk = TAILQ_FIRST(&control->reasm);
876 }
877 /* Spin the rest onto the queue */
878 while (tchk) {
879 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
880 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
881 tchk = TAILQ_FIRST(&control->reasm);
882 }
883 /* Now lets add it to the queue after removing control */
884 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
885 nc->on_strm_q = SCTP_ON_UNORDERED;
886 if (control->on_strm_q) {
887 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
888 control->on_strm_q = 0;
889 }
890 }
Michael Tuexene5001952016-04-17 19:25:27 +0200891 if (control->pdapi_started) {
892 strm->pd_api_started = 0;
893 control->pdapi_started = 0;
894 }
895 if (control->on_strm_q) {
896 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
897 control->on_strm_q = 0;
Michael Tuexen07cc2ed2016-07-17 10:33:57 +0200898 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
Michael Tuexene5001952016-04-17 19:25:27 +0200899 }
Michael Tuexenb0298bf2016-04-27 21:01:33 +0200900 if (control->on_read_q == 0) {
901 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
902 &stcb->sctp_socket->so_rcv, control->end_added,
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200903 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
Michael Tuexen65394262016-05-09 08:17:54 +0200904#if defined(__Userspace__)
905 } else {
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200906 sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held);
Michael Tuexen65394262016-05-09 08:17:54 +0200907#endif
Michael Tuexenb0298bf2016-04-27 21:01:33 +0200908 }
Michael Tuexena9d8c472016-04-18 22:22:59 +0200909 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
Michael Tuexen2a74c792016-07-19 13:20:24 +0200910 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
Michael Tuexene5001952016-04-17 19:25:27 +0200911 /* Switch to the new guy and continue */
912 control = nc;
Michael Tuexene5001952016-04-17 19:25:27 +0200913 goto restart;
Michael Tuexenc0d26d92016-07-16 12:22:25 +0200914 } else {
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200915 if (nc->on_strm_q == 0) {
916 sctp_free_a_readq(stcb, nc);
917 }
Michael Tuexene5001952016-04-17 19:25:27 +0200918 }
919 return (1);
Michael Tuexenc0d26d92016-07-16 12:22:25 +0200920 } else {
921 sctp_free_a_readq(stcb, nc);
Michael Tuexene5001952016-04-17 19:25:27 +0200922 }
923 } else {
924 /* Can't add more */
925 break;
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200926 }
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200927 }
Michael Tüxen703ca422019-07-22 14:13:53 -0400928 if (cnt_added && strm->pd_api_started) {
929#if defined(__Userspace__)
Michael Tuexenb882f262021-01-31 10:07:47 +0100930 sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held);
Michael Tüxen703ca422019-07-22 14:13:53 -0400931#endif
932 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
933 }
Michael Tuexene5001952016-04-17 19:25:27 +0200934 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
Michael Tuexenb0298bf2016-04-27 21:01:33 +0200935 strm->pd_api_started = 1;
936 control->pdapi_started = 1;
Michael Tuexene5001952016-04-17 19:25:27 +0200937 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
938 &stcb->sctp_socket->so_rcv, control->end_added,
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200939 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
Michael Tuexena9d8c472016-04-18 22:22:59 +0200940 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +0200941 return (0);
942 } else {
943 return (1);
944 }
945}
946
947static void
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200948sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
949 struct sctp_association *asoc,
950 struct sctp_queued_to_read *control,
951 struct sctp_tmit_chunk *chk,
952 int *abort_flag)
Michael Tuexene5001952016-04-17 19:25:27 +0200953{
954 struct sctp_tmit_chunk *at;
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200955 int inserted;
Michael Tuexene5001952016-04-17 19:25:27 +0200956 /*
957 * Here we need to place the chunk into the control structure
958 * sorted in the correct order.
959 */
960 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
961 /* Its the very first one. */
962 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +0200963 "chunk is a first fsn: %u becomes fsn_included\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +0100964 chk->rec.data.fsn);
Michael Tuexen6dcb0e02019-03-26 14:08:49 +0100965 at = TAILQ_FIRST(&control->reasm);
966 if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
Michael Tuexend3331282020-02-03 23:14:00 +0100967 /*
Michael Tuexen6dcb0e02019-03-26 14:08:49 +0100968 * The first chunk in the reassembly is
969 * a smaller TSN than this one, even though
970 * this has a first, it must be from a subsequent
971 * msg.
972 */
973 goto place_chunk;
974 }
Michael Tuexene5001952016-04-17 19:25:27 +0200975 if (control->first_frag_seen) {
976 /*
977 * In old un-ordered we can reassembly on
978 * one control multiple messages. As long
979 * as the next FIRST is greater then the old
980 * first (TSN i.e. FSN wise)
981 */
982 struct mbuf *tdata;
983 uint32_t tmp;
984
Michael Tuexen00657ac2016-12-07 21:53:26 +0100985 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
Michael Tuexene5001952016-04-17 19:25:27 +0200986 /* Easy way the start of a new guy beyond the lowest */
987 goto place_chunk;
988 }
Michael Tuexen00657ac2016-12-07 21:53:26 +0100989 if ((chk->rec.data.fsn == control->fsn_included) ||
Michael Tuexene5001952016-04-17 19:25:27 +0200990 (control->pdapi_started)) {
Michael Tuexend3331282020-02-03 23:14:00 +0100991 /*
Michael Tuexene5001952016-04-17 19:25:27 +0200992 * Ok this should not happen, if it does
993 * we started the pd-api on the higher TSN (since
994 * the equals part is a TSN failure it must be that).
995 *
David Sandersb519f512022-03-29 13:55:16 -0700996 * We are completely hosed in that case since I have
Michael Tuexene5001952016-04-17 19:25:27 +0200997 * no way to recover. This really will only happen
998 * if we can get more TSN's higher before the pd-api-point.
999 */
Michael Tuexena9d8c472016-04-18 22:22:59 +02001000 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001001 abort_flag,
1002 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
1003
1004 return;
1005 }
1006 /*
1007 * Ok we have two firsts and the one we just got
1008 * is smaller than the one we previously placed.. yuck!
1009 * We must swap them out.
1010 */
1011 /* swap the mbufs */
1012 tdata = control->data;
1013 control->data = chk->data;
1014 chk->data = tdata;
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001015 /* Save the lengths */
1016 chk->send_size = control->length;
1017 /* Recompute length of control and tail pointer */
1018 sctp_setup_tail_pointer(control);
Michael Tuexene5001952016-04-17 19:25:27 +02001019 /* Fix the FSN included */
1020 tmp = control->fsn_included;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001021 control->fsn_included = chk->rec.data.fsn;
1022 chk->rec.data.fsn = tmp;
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001023 /* Fix the TSN included */
1024 tmp = control->sinfo_tsn;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001025 control->sinfo_tsn = chk->rec.data.tsn;
1026 chk->rec.data.tsn = tmp;
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001027 /* Fix the PPID included */
1028 tmp = control->sinfo_ppid;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001029 control->sinfo_ppid = chk->rec.data.ppid;
1030 chk->rec.data.ppid = tmp;
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001031 /* Fix tail pointer */
Michael Tuexene5001952016-04-17 19:25:27 +02001032 goto place_chunk;
1033 }
1034 control->first_frag_seen = 1;
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001035 control->fsn_included = chk->rec.data.fsn;
1036 control->top_fsn = chk->rec.data.fsn;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001037 control->sinfo_tsn = chk->rec.data.tsn;
1038 control->sinfo_ppid = chk->rec.data.ppid;
Michael Tuexene5001952016-04-17 19:25:27 +02001039 control->data = chk->data;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001040 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001041 chk->data = NULL;
1042 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1043 sctp_setup_tail_pointer(control);
1044 return;
1045 }
1046place_chunk:
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001047 inserted = 0;
Michael Tuexene5001952016-04-17 19:25:27 +02001048 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01001049 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001050 /*
1051 * This one in queue is bigger than the new one, insert
1052 * the new one before at.
1053 */
1054 asoc->size_on_reasm_queue += chk->send_size;
1055 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1056 inserted = 1;
1057 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1058 break;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001059 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
Michael Tuexend3331282020-02-03 23:14:00 +01001060 /*
Michael Tuexene5001952016-04-17 19:25:27 +02001061 * They sent a duplicate fsn number. This
1062 * really should not happen since the FSN is
1063 * a TSN and it should have been dropped earlier.
1064 */
Michael Tuexena9d8c472016-04-18 22:22:59 +02001065 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001066 abort_flag,
1067 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1068 return;
1069 }
Michael Tuexene5001952016-04-17 19:25:27 +02001070 }
1071 if (inserted == 0) {
1072 /* Its at the end */
1073 asoc->size_on_reasm_queue += chk->send_size;
1074 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
Michael Tuexen00657ac2016-12-07 21:53:26 +01001075 control->top_fsn = chk->rec.data.fsn;
Michael Tuexene5001952016-04-17 19:25:27 +02001076 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1077 }
1078}
1079
1080static int
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001081sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1082 struct sctp_stream_in *strm, int inp_read_lock_held)
Michael Tuexene5001952016-04-17 19:25:27 +02001083{
1084 /*
1085 * Given a stream, strm, see if any of
1086 * the SSN's on it that are fragmented
1087 * are ready to deliver. If so go ahead
1088 * and place them on the read queue. In
1089 * so placing if we have hit the end, then
1090 * we need to remove them from the stream's queue.
1091 */
Michael Tuexene411f662016-12-17 23:36:21 +01001092 struct sctp_queued_to_read *control, *nctl = NULL;
Michael Tuexene5001952016-04-17 19:25:27 +02001093 uint32_t next_to_del;
1094 uint32_t pd_point;
1095 int ret = 0;
1096
1097 if (stcb->sctp_socket) {
1098 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1099 stcb->sctp_ep->partial_delivery_point);
1100 } else {
1101 pd_point = stcb->sctp_ep->partial_delivery_point;
1102 }
1103 control = TAILQ_FIRST(&strm->uno_inqueue);
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001104
Michael Tuexene411f662016-12-17 23:36:21 +01001105 if ((control != NULL) &&
Michael Tuexene5001952016-04-17 19:25:27 +02001106 (asoc->idata_supported == 0)) {
1107 /* Special handling needed for "old" data format */
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001108 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001109 goto done_un;
1110 }
1111 }
1112 if (strm->pd_api_started) {
1113 /* Can't add more */
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001114 return (0);
Michael Tuexene5001952016-04-17 19:25:27 +02001115 }
1116 while (control) {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001117 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001118 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
Michael Tuexene5001952016-04-17 19:25:27 +02001119 nctl = TAILQ_NEXT(control, next_instrm);
1120 if (control->end_added) {
1121 /* We just put the last bit on */
1122 if (control->on_strm_q) {
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001123#ifdef INVARIANTS
Michael Tuexen53579be2020-10-07 17:39:17 +02001124 if (control->on_strm_q != SCTP_ON_UNORDERED) {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001125 panic("Huh control: %p on_q: %d -- not unordered?",
Michael Tuexene5001952016-04-17 19:25:27 +02001126 control, control->on_strm_q);
1127 }
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001128#endif
Michael Tuexen07cc2ed2016-07-17 10:33:57 +02001129 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
Michael Tuexene5001952016-04-17 19:25:27 +02001130 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
Michael Tuexen4f5d9622020-06-24 16:49:59 +02001131 if (asoc->size_on_all_streams >= control->length) {
1132 asoc->size_on_all_streams -= control->length;
1133 } else {
1134#ifdef INVARIANTS
1135 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1136#else
1137 asoc->size_on_all_streams = 0;
1138#endif
1139 }
1140 sctp_ucount_decr(asoc->cnt_on_all_streams);
Michael Tuexene5001952016-04-17 19:25:27 +02001141 control->on_strm_q = 0;
1142 }
1143 if (control->on_read_q == 0) {
1144 sctp_add_to_readq(stcb->sctp_ep, stcb,
1145 control,
1146 &stcb->sctp_socket->so_rcv, control->end_added,
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001147 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +02001148 }
1149 } else {
1150 /* Can we do a PD-API for this un-ordered guy? */
1151 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1152 strm->pd_api_started = 1;
1153 control->pdapi_started = 1;
1154 sctp_add_to_readq(stcb->sctp_ep, stcb,
1155 control,
1156 &stcb->sctp_socket->so_rcv, control->end_added,
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001157 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
Michael Tuexenea37a472017-12-08 00:07:41 +01001158
Michael Tuexene5001952016-04-17 19:25:27 +02001159 break;
1160 }
1161 }
1162 control = nctl;
1163 }
1164done_un:
1165 control = TAILQ_FIRST(&strm->inqueue);
1166 if (strm->pd_api_started) {
1167 /* Can't add more */
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001168 return (0);
Michael Tuexene5001952016-04-17 19:25:27 +02001169 }
1170 if (control == NULL) {
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001171 return (ret);
Michael Tuexene5001952016-04-17 19:25:27 +02001172 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01001173 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001174 /* Ok the guy at the top was being partially delivered
1175 * completed, so we remove it. Note
1176 * the pd_api flag was taken off when the
1177 * chunk was merged on in sctp_queue_data_for_reasm below.
1178 */
1179 nctl = TAILQ_NEXT(control, next_instrm);
1180 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001181 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001182 control, control->end_added, control->mid,
Michael Tuexene5001952016-04-17 19:25:27 +02001183 control->top_fsn, control->fsn_included,
Michael Tuexen00657ac2016-12-07 21:53:26 +01001184 strm->last_mid_delivered);
Michael Tuexene5001952016-04-17 19:25:27 +02001185 if (control->end_added) {
1186 if (control->on_strm_q) {
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001187#ifdef INVARIANTS
Michael Tuexen53579be2020-10-07 17:39:17 +02001188 if (control->on_strm_q != SCTP_ON_ORDERED) {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001189 panic("Huh control: %p on_q: %d -- not ordered?",
Michael Tuexene5001952016-04-17 19:25:27 +02001190 control, control->on_strm_q);
1191 }
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001192#endif
Michael Tuexen07cc2ed2016-07-17 10:33:57 +02001193 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
Michael Tuexene5001952016-04-17 19:25:27 +02001194 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001195 if (asoc->size_on_all_streams >= control->length) {
1196 asoc->size_on_all_streams -= control->length;
1197 } else {
1198#ifdef INVARIANTS
1199 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1200#else
1201 asoc->size_on_all_streams = 0;
1202#endif
1203 }
1204 sctp_ucount_decr(asoc->cnt_on_all_streams);
Michael Tuexene5001952016-04-17 19:25:27 +02001205 control->on_strm_q = 0;
1206 }
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001207 if (strm->pd_api_started && control->pdapi_started) {
1208 control->pdapi_started = 0;
1209 strm->pd_api_started = 0;
1210 }
Michael Tuexene5001952016-04-17 19:25:27 +02001211 if (control->on_read_q == 0) {
1212 sctp_add_to_readq(stcb->sctp_ep, stcb,
1213 control,
1214 &stcb->sctp_socket->so_rcv, control->end_added,
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001215 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +02001216 }
Michael Tuexene5001952016-04-17 19:25:27 +02001217 control = nctl;
1218 }
1219 }
1220 if (strm->pd_api_started) {
1221 /* Can't add more must have gotten an un-ordered above being partially delivered. */
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001222 return (0);
Michael Tuexene5001952016-04-17 19:25:27 +02001223 }
1224deliver_more:
Michael Tuexen00657ac2016-12-07 21:53:26 +01001225 next_to_del = strm->last_mid_delivered + 1;
Michael Tuexene5001952016-04-17 19:25:27 +02001226 if (control) {
1227 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001228 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001229 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
Michael Tuexene5001952016-04-17 19:25:27 +02001230 next_to_del);
1231 nctl = TAILQ_NEXT(control, next_instrm);
Michael Tuexen00657ac2016-12-07 21:53:26 +01001232 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
Michael Tuexene5001952016-04-17 19:25:27 +02001233 (control->first_frag_seen)) {
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001234 int done;
1235
Michael Tuexene5001952016-04-17 19:25:27 +02001236 /* Ok we can deliver it onto the stream. */
1237 if (control->end_added) {
1238 /* We are done with it afterwards */
1239 if (control->on_strm_q) {
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001240#ifdef INVARIANTS
Michael Tuexen53579be2020-10-07 17:39:17 +02001241 if (control->on_strm_q != SCTP_ON_ORDERED) {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001242 panic("Huh control: %p on_q: %d -- not ordered?",
Michael Tuexene5001952016-04-17 19:25:27 +02001243 control, control->on_strm_q);
1244 }
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001245#endif
Michael Tuexen07cc2ed2016-07-17 10:33:57 +02001246 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
Michael Tuexene5001952016-04-17 19:25:27 +02001247 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001248 if (asoc->size_on_all_streams >= control->length) {
1249 asoc->size_on_all_streams -= control->length;
1250 } else {
1251#ifdef INVARIANTS
1252 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1253#else
1254 asoc->size_on_all_streams = 0;
1255#endif
1256 }
1257 sctp_ucount_decr(asoc->cnt_on_all_streams);
Michael Tuexene5001952016-04-17 19:25:27 +02001258 control->on_strm_q = 0;
1259 }
1260 ret++;
1261 }
1262 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1263 /* A singleton now slipping through - mark it non-revokable too */
1264 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1265 } else if (control->end_added == 0) {
1266 /* Check if we can defer adding until its all there */
1267 if ((control->length < pd_point) || (strm->pd_api_started)) {
1268 /* Don't need it or cannot add more (one being delivered that way) */
1269 goto out;
1270 }
1271 }
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001272 done = (control->end_added) && (control->last_frag_seen);
Michael Tuexene5001952016-04-17 19:25:27 +02001273 if (control->on_read_q == 0) {
Michael Tuexencdba1262017-11-05 13:05:10 +01001274 if (!done) {
1275 if (asoc->size_on_all_streams >= control->length) {
1276 asoc->size_on_all_streams -= control->length;
1277 } else {
1278#ifdef INVARIANTS
1279 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1280#else
1281 asoc->size_on_all_streams = 0;
1282#endif
1283 }
1284 strm->pd_api_started = 1;
1285 control->pdapi_started = 1;
1286 }
Michael Tuexene5001952016-04-17 19:25:27 +02001287 sctp_add_to_readq(stcb->sctp_ep, stcb,
1288 control,
1289 &stcb->sctp_socket->so_rcv, control->end_added,
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001290 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +02001291 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01001292 strm->last_mid_delivered = next_to_del;
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001293 if (done) {
Michael Tuexene5001952016-04-17 19:25:27 +02001294 control = nctl;
1295 goto deliver_more;
Michael Tuexene5001952016-04-17 19:25:27 +02001296 }
1297 }
1298 }
1299out:
1300 return (ret);
1301}
1302
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001303uint32_t
Michael Tuexene5001952016-04-17 19:25:27 +02001304sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1305 struct sctp_stream_in *strm,
1306 struct sctp_tcb *stcb, struct sctp_association *asoc,
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001307 struct sctp_tmit_chunk *chk, int hold_rlock)
Michael Tuexene5001952016-04-17 19:25:27 +02001308{
1309 /*
1310 * Given a control and a chunk, merge the
1311 * data from the chk onto the control and free
1312 * up the chunk resources.
1313 */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001314 uint32_t added=0;
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001315 int i_locked = 0;
Michael Tuexene5001952016-04-17 19:25:27 +02001316
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001317 if (control->on_read_q && (hold_rlock == 0)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001318 /*
1319 * Its being pd-api'd so we must
1320 * do some locks.
1321 */
1322 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1323 i_locked = 1;
1324 }
1325 if (control->data == NULL) {
1326 control->data = chk->data;
1327 sctp_setup_tail_pointer(control);
1328 } else {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001329 sctp_add_to_tail_pointer(control, chk->data, &added);
Michael Tuexene5001952016-04-17 19:25:27 +02001330 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01001331 control->fsn_included = chk->rec.data.fsn;
Michael Tuexene5001952016-04-17 19:25:27 +02001332 asoc->size_on_reasm_queue -= chk->send_size;
1333 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
Michael Tuexen00657ac2016-12-07 21:53:26 +01001334 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001335 chk->data = NULL;
1336 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1337 control->first_frag_seen = 1;
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001338 control->sinfo_tsn = chk->rec.data.tsn;
1339 control->sinfo_ppid = chk->rec.data.ppid;
Michael Tuexene5001952016-04-17 19:25:27 +02001340 }
1341 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1342 /* Its complete */
1343 if ((control->on_strm_q) && (control->on_read_q)) {
1344 if (control->pdapi_started) {
1345 control->pdapi_started = 0;
1346 strm->pd_api_started = 0;
1347 }
1348 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1349 /* Unordered */
1350 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1351 control->on_strm_q = 0;
1352 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1353 /* Ordered */
1354 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
Michael Tuexencdba1262017-11-05 13:05:10 +01001355 /*
1356 * Don't need to decrement size_on_all_streams,
1357 * since control is on the read queue.
1358 */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001359 sctp_ucount_decr(asoc->cnt_on_all_streams);
Michael Tuexene5001952016-04-17 19:25:27 +02001360 control->on_strm_q = 0;
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001361#ifdef INVARIANTS
Michael Tuexene5001952016-04-17 19:25:27 +02001362 } else if (control->on_strm_q) {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001363 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
Michael Tuexene5001952016-04-17 19:25:27 +02001364 control->on_strm_q);
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001365#endif
Michael Tuexene5001952016-04-17 19:25:27 +02001366 }
1367 }
1368 control->end_added = 1;
1369 control->last_frag_seen = 1;
1370 }
1371 if (i_locked) {
1372 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1373 }
Michael Tuexen98b74552016-05-09 17:41:56 +02001374 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001375 return (added);
tuexendd729232011-11-01 23:04:43 +00001376}
1377
1378/*
1379 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1380 * queue, see if anthing can be delivered. If so pull it off (or as much as
1381 * we can. If we run out of space then we must dump what we can and set the
1382 * appropriate flag to say we queued what we could.
1383 */
1384static void
1385sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
Michael Tuexene5001952016-04-17 19:25:27 +02001386 struct sctp_queued_to_read *control,
1387 struct sctp_tmit_chunk *chk,
1388 int created_control,
1389 int *abort_flag, uint32_t tsn)
tuexendd729232011-11-01 23:04:43 +00001390{
Michael Tuexene5001952016-04-17 19:25:27 +02001391 uint32_t next_fsn;
1392 struct sctp_tmit_chunk *at, *nat;
Michael Tuexene411f662016-12-17 23:36:21 +01001393 struct sctp_stream_in *strm;
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001394 int do_wakeup, unordered;
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001395 uint32_t lenadded;
tuexendd729232011-11-01 23:04:43 +00001396
Michael Tuexene411f662016-12-17 23:36:21 +01001397 strm = &asoc->strmin[control->sinfo_stream];
Michael Tuexene5001952016-04-17 19:25:27 +02001398 /*
1399 * For old un-ordered data chunks.
1400 */
1401 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1402 unordered = 1;
1403 } else {
1404 unordered = 0;
1405 }
1406 /* Must be added to the stream-in queue */
1407 if (created_control) {
Michael Tuexen4f5d9622020-06-24 16:49:59 +02001408 if ((unordered == 0) || (asoc->idata_supported)) {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001409 sctp_ucount_incr(asoc->cnt_on_all_streams);
1410 }
Michael Tuexene5001952016-04-17 19:25:27 +02001411 if (sctp_place_control_in_stream(strm, asoc, control)) {
1412 /* Duplicate SSN? */
Michael Tuexena9d8c472016-04-18 22:22:59 +02001413 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001414 abort_flag,
1415 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
Michael Tuexen663a17e2017-09-21 11:49:43 +02001416 sctp_clean_up_control(stcb, control);
Michael Tuexene5001952016-04-17 19:25:27 +02001417 return;
1418 }
1419 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1420 /* Ok we created this control and now
1421 * lets validate that its legal i.e. there
1422 * is a B bit set, if not and we have
1423 * up to the cum-ack then its invalid.
1424 */
1425 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
Michael Tuexena9d8c472016-04-18 22:22:59 +02001426 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001427 abort_flag,
1428 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1429 return;
tuexendd729232011-11-01 23:04:43 +00001430 }
1431 }
Michael Tuexene5001952016-04-17 19:25:27 +02001432 }
Michael Tuexen34a90e22016-04-18 11:35:26 +02001433 if ((asoc->idata_supported == 0) && (unordered == 1)) {
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001434 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
tuexendd729232011-11-01 23:04:43 +00001435 return;
1436 }
Michael Tuexene5001952016-04-17 19:25:27 +02001437 /*
1438 * Ok we must queue the chunk into the reasembly portion:
1439 * o if its the first it goes to the control mbuf.
1440 * o if its not first but the next in sequence it goes to the control,
1441 * and each succeeding one in order also goes.
1442 * o if its not in order we place it on the list in its place.
1443 */
1444 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1445 /* Its the very first one. */
1446 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001447 "chunk is a first fsn: %u becomes fsn_included\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001448 chk->rec.data.fsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001449 if (control->first_frag_seen) {
tuexendd729232011-11-01 23:04:43 +00001450 /*
Michael Tuexene5001952016-04-17 19:25:27 +02001451 * Error on senders part, they either
1452 * sent us two data chunks with FIRST,
1453 * or they sent two un-ordered chunks that
1454 * were fragmented at the same time in the same stream.
tuexendd729232011-11-01 23:04:43 +00001455 */
Michael Tuexena9d8c472016-04-18 22:22:59 +02001456 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001457 abort_flag,
1458 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
Michael Tuexen3121b802016-04-10 23:28:19 +02001459 return;
Michael Tuexene5001952016-04-17 19:25:27 +02001460 }
1461 control->first_frag_seen = 1;
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001462 control->sinfo_ppid = chk->rec.data.ppid;
1463 control->sinfo_tsn = chk->rec.data.tsn;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001464 control->fsn_included = chk->rec.data.fsn;
Michael Tuexene5001952016-04-17 19:25:27 +02001465 control->data = chk->data;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001466 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001467 chk->data = NULL;
1468 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1469 sctp_setup_tail_pointer(control);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001470 asoc->size_on_all_streams += control->length;
Michael Tuexene5001952016-04-17 19:25:27 +02001471 } else {
1472 /* Place the chunk in our list */
1473 int inserted=0;
Michael Tuexen34a90e22016-04-18 11:35:26 +02001474 if (control->last_frag_seen == 0) {
Michael Tuexene5001952016-04-17 19:25:27 +02001475 /* Still willing to raise highest FSN seen */
Michael Tuexen00657ac2016-12-07 21:53:26 +01001476 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001477 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001478 "We have a new top_fsn: %u\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001479 chk->rec.data.fsn);
1480 control->top_fsn = chk->rec.data.fsn;
Michael Tuexene5001952016-04-17 19:25:27 +02001481 }
1482 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1483 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001484 "The last fsn is now in place fsn: %u\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001485 chk->rec.data.fsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001486 control->last_frag_seen = 1;
Michael Tuexen46878032019-08-31 10:22:19 +02001487 if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1488 SCTPDBG(SCTP_DEBUG_XXX,
1489 "New fsn: %u is not at top_fsn: %u -- abort\n",
1490 chk->rec.data.fsn,
1491 control->top_fsn);
1492 sctp_abort_in_reasm(stcb, control, chk,
1493 abort_flag,
1494 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1495 return;
1496 }
Michael Tuexene5001952016-04-17 19:25:27 +02001497 }
1498 if (asoc->idata_supported || control->first_frag_seen) {
Michael Tuexend3331282020-02-03 23:14:00 +01001499 /*
Michael Tuexene5001952016-04-17 19:25:27 +02001500 * For IDATA we always check since we know that
1501 * the first fragment is 0. For old DATA we have
Michael Tuexen34488e72016-05-03 22:11:59 +02001502 * to receive the first before we know the first FSN
Michael Tuexene5001952016-04-17 19:25:27 +02001503 * (which is the TSN).
Michael Tuexen3121b802016-04-10 23:28:19 +02001504 */
Michael Tuexen00657ac2016-12-07 21:53:26 +01001505 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001506 /* We have already delivered up to this so its a dup */
Michael Tuexena9d8c472016-04-18 22:22:59 +02001507 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001508 abort_flag,
Michael Tuexen46878032019-08-31 10:22:19 +02001509 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
Michael Tuexene5001952016-04-17 19:25:27 +02001510 return;
1511 }
1512 }
1513 } else {
1514 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1515 /* Second last? huh? */
1516 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001517 "Duplicate last fsn: %u (top: %u) -- abort\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001518 chk->rec.data.fsn, control->top_fsn);
Michael Tuexena9d8c472016-04-18 22:22:59 +02001519 sctp_abort_in_reasm(stcb, control,
Michael Tuexene5001952016-04-17 19:25:27 +02001520 chk, abort_flag,
Michael Tuexen46878032019-08-31 10:22:19 +02001521 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
Michael Tuexene5001952016-04-17 19:25:27 +02001522 return;
1523 }
1524 if (asoc->idata_supported || control->first_frag_seen) {
Michael Tuexend3331282020-02-03 23:14:00 +01001525 /*
Michael Tuexene5001952016-04-17 19:25:27 +02001526 * For IDATA we always check since we know that
1527 * the first fragment is 0. For old DATA we have
Michael Tuexen34488e72016-05-03 22:11:59 +02001528 * to receive the first before we know the first FSN
Michael Tuexene5001952016-04-17 19:25:27 +02001529 * (which is the TSN).
1530 */
1531
Michael Tuexen00657ac2016-12-07 21:53:26 +01001532 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001533 /* We have already delivered up to this so its a dup */
1534 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001535 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001536 chk->rec.data.fsn, control->fsn_included);
Michael Tuexena9d8c472016-04-18 22:22:59 +02001537 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001538 abort_flag,
Michael Tuexen46878032019-08-31 10:22:19 +02001539 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
Michael Tuexene5001952016-04-17 19:25:27 +02001540 return;
1541 }
1542 }
1543 /* validate not beyond top FSN if we have seen last one */
Michael Tuexen00657ac2016-12-07 21:53:26 +01001544 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001545 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001546 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001547 chk->rec.data.fsn,
Michael Tuexene5001952016-04-17 19:25:27 +02001548 control->top_fsn);
Michael Tuexena9d8c472016-04-18 22:22:59 +02001549 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001550 abort_flag,
Michael Tuexen46878032019-08-31 10:22:19 +02001551 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
Michael Tuexene5001952016-04-17 19:25:27 +02001552 return;
1553 }
1554 }
1555 /*
1556 * If we reach here, we need to place the
Michael Tuexend3331282020-02-03 23:14:00 +01001557 * new chunk in the reassembly for this
Michael Tuexene5001952016-04-17 19:25:27 +02001558 * control.
1559 */
1560 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001561 "chunk is a not first fsn: %u needs to be inserted\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001562 chk->rec.data.fsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001563 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01001564 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
Michael Tuexen555c8e82020-07-23 03:43:26 +02001565 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1566 /* Last not at the end? huh? */
1567 SCTPDBG(SCTP_DEBUG_XXX,
1568 "Last fragment not last in list: -- abort\n");
1569 sctp_abort_in_reasm(stcb, control,
1570 chk, abort_flag,
1571 SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1572 return;
1573 }
Michael Tuexene5001952016-04-17 19:25:27 +02001574 /*
1575 * This one in queue is bigger than the new one, insert
1576 * the new one before at.
1577 */
1578 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001579 "Insert it before fsn: %u\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001580 at->rec.data.fsn);
Michael Tuexen3121b802016-04-10 23:28:19 +02001581 asoc->size_on_reasm_queue += chk->send_size;
1582 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
Michael Tuexene5001952016-04-17 19:25:27 +02001583 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1584 inserted = 1;
1585 break;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001586 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
Michael Tuexene5001952016-04-17 19:25:27 +02001587 /* Gak, He sent me a duplicate str seq number */
1588 /*
1589 * foo bar, I guess I will just free this new guy,
1590 * should we abort too? FIX ME MAYBE? Or it COULD be
1591 * that the SSN's have wrapped. Maybe I should
1592 * compare to TSN somehow... sigh for now just blow
1593 * away the chunk!
1594 */
1595 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001596 "Duplicate to fsn: %u -- abort\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001597 at->rec.data.fsn);
Michael Tuexena9d8c472016-04-18 22:22:59 +02001598 sctp_abort_in_reasm(stcb, control,
Michael Tuexene5001952016-04-17 19:25:27 +02001599 chk, abort_flag,
Michael Tuexen555c8e82020-07-23 03:43:26 +02001600 SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
Michael Tuexene5001952016-04-17 19:25:27 +02001601 return;
1602 }
1603 }
1604 if (inserted == 0) {
1605 /* Goes on the end */
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001606 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001607 chk->rec.data.fsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001608 asoc->size_on_reasm_queue += chk->send_size;
1609 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1610 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1611 }
1612 }
1613 /*
1614 * Ok lets see if we can suck any up into the control
1615 * structure that are in seq if it makes sense.
1616 */
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001617 do_wakeup = 0;
Michael Tuexene5001952016-04-17 19:25:27 +02001618 /*
1619 * If the first fragment has not been
1620 * seen there is no sense in looking.
1621 */
1622 if (control->first_frag_seen) {
1623 next_fsn = control->fsn_included + 1;
1624 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01001625 if (at->rec.data.fsn == next_fsn) {
Michael Tuexene5001952016-04-17 19:25:27 +02001626 /* We can add this one now to the control */
1627 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001628 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
Michael Tuexene5001952016-04-17 19:25:27 +02001629 control, at,
Michael Tuexen00657ac2016-12-07 21:53:26 +01001630 at->rec.data.fsn,
Michael Tuexene5001952016-04-17 19:25:27 +02001631 next_fsn, control->fsn_included);
1632 TAILQ_REMOVE(&control->reasm, at, sctp_next);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001633 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001634 if (control->on_read_q) {
1635 do_wakeup = 1;
Michael Tuexena7360a12017-09-17 11:30:34 +02001636 } else {
1637 /*
1638 * We only add to the size-on-all-streams
1639 * if its not on the read q. The read q
1640 * flag will cause a sballoc so its accounted
1641 * for there.
1642 */
1643 asoc->size_on_all_streams += lenadded;
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001644 }
Michael Tuexene5001952016-04-17 19:25:27 +02001645 next_fsn++;
1646 if (control->end_added && control->pdapi_started) {
1647 if (strm->pd_api_started) {
1648 strm->pd_api_started = 0;
1649 control->pdapi_started = 0;
1650 }
1651 if (control->on_read_q == 0) {
1652 sctp_add_to_readq(stcb->sctp_ep, stcb,
1653 control,
1654 &stcb->sctp_socket->so_rcv, control->end_added,
1655 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1656 }
1657 break;
1658 }
1659 } else {
Michael Tuexen3121b802016-04-10 23:28:19 +02001660 break;
1661 }
Michael Tuexen48ebe5e2016-04-10 23:10:14 +02001662 }
1663 }
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001664 if (do_wakeup) {
Michael Tuexen65394262016-05-09 08:17:54 +02001665#if defined(__Userspace__)
1666 sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD);
1667#endif
Michael Tuexen98b74552016-05-09 17:41:56 +02001668 /* Need to wakeup the reader */
Michael Tuexena9d8c472016-04-18 22:22:59 +02001669 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00001670 }
tuexendd729232011-11-01 23:04:43 +00001671}
1672
Michael Tuexene5001952016-04-17 19:25:27 +02001673static struct sctp_queued_to_read *
Michael Tuexen00657ac2016-12-07 21:53:26 +01001674sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
tuexendd729232011-11-01 23:04:43 +00001675{
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001676 struct sctp_queued_to_read *control;
1677
Michael Tuexene5001952016-04-17 19:25:27 +02001678 if (ordered) {
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001679 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01001680 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001681 break;
tuexendd729232011-11-01 23:04:43 +00001682 }
Michael Tuexene5001952016-04-17 19:25:27 +02001683 }
1684 } else {
Michael Tuexen00657ac2016-12-07 21:53:26 +01001685 if (idata_supported) {
1686 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1687 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1688 break;
1689 }
tuexendd729232011-11-01 23:04:43 +00001690 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01001691 } else {
1692 control = TAILQ_FIRST(&strm->uno_inqueue);
tuexendd729232011-11-01 23:04:43 +00001693 }
1694 }
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001695 return (control);
tuexendd729232011-11-01 23:04:43 +00001696}
1697
tuexendd729232011-11-01 23:04:43 +00001698static int
1699sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
Michael Tuexene5001952016-04-17 19:25:27 +02001700 struct mbuf **m, int offset, int chk_length,
1701 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001702 int *break_flag, int last_chunk, uint8_t chk_type)
tuexendd729232011-11-01 23:04:43 +00001703{
Michael Tuexen022ef442018-05-21 17:04:54 +02001704 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
Michael Tuexenffed0922020-07-10 13:24:15 +02001705 struct sctp_stream_in *strm;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001706 uint32_t tsn, fsn, gap, mid;
tuexendd729232011-11-01 23:04:43 +00001707 struct mbuf *dmbuf;
tuexen9784e9a2011-12-18 13:04:23 +00001708 int the_len;
tuexendd729232011-11-01 23:04:43 +00001709 int need_reasm_check = 0;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001710 uint16_t sid;
t00fcxen08f9ff92014-03-16 13:38:54 +00001711 struct mbuf *op_err;
1712 char msg[SCTP_DIAG_INFO_LEN];
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001713 struct sctp_queued_to_read *control, *ncontrol;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001714 uint32_t ppid;
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001715 uint8_t chk_flags;
tuexendd729232011-11-01 23:04:43 +00001716 struct sctp_stream_reset_list *liste;
Michael Tuexene5001952016-04-17 19:25:27 +02001717 int ordered;
1718 size_t clen;
1719 int created_control = 0;
tuexendd729232011-11-01 23:04:43 +00001720
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001721 if (chk_type == SCTP_IDATA) {
1722 struct sctp_idata_chunk *chunk, chunk_buf;
1723
1724 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1725 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1726 chk_flags = chunk->ch.chunk_flags;
Michael Tuexene5001952016-04-17 19:25:27 +02001727 clen = sizeof(struct sctp_idata_chunk);
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001728 tsn = ntohl(chunk->dp.tsn);
1729 sid = ntohs(chunk->dp.sid);
1730 mid = ntohl(chunk->dp.mid);
1731 if (chk_flags & SCTP_DATA_FIRST_FRAG) {
Michael Tuexene5001952016-04-17 19:25:27 +02001732 fsn = 0;
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001733 ppid = chunk->dp.ppid_fsn.ppid;
1734 } else {
1735 fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1736 ppid = 0xffffffff; /* Use as an invalid value. */
1737 }
Michael Tuexene5001952016-04-17 19:25:27 +02001738 } else {
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001739 struct sctp_data_chunk *chunk, chunk_buf;
1740
1741 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1742 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1743 chk_flags = chunk->ch.chunk_flags;
Michael Tuexene5001952016-04-17 19:25:27 +02001744 clen = sizeof(struct sctp_data_chunk);
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001745 tsn = ntohl(chunk->dp.tsn);
1746 sid = ntohs(chunk->dp.sid);
1747 mid = (uint32_t)(ntohs(chunk->dp.ssn));
Michael Tuexene5001952016-04-17 19:25:27 +02001748 fsn = tsn;
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001749 ppid = chunk->dp.ppid;
Michael Tuexene5001952016-04-17 19:25:27 +02001750 }
Michael Tuexene5001952016-04-17 19:25:27 +02001751 if ((size_t)chk_length == clen) {
1752 /*
1753 * Need to send an abort since we had a
1754 * empty data chunk.
1755 */
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001756 op_err = sctp_generate_no_user_data_cause(tsn);
Michael Tuexen555c8e82020-07-23 03:43:26 +02001757 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
Michael Tuexen1ade45c2021-07-09 23:32:42 +02001758 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +02001759 *abort_flag = 1;
1760 return (0);
1761 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001762 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
tuexendd729232011-11-01 23:04:43 +00001763 asoc->send_sack = 1;
1764 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001765 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
tuexendd729232011-11-01 23:04:43 +00001766 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1767 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1768 }
1769 if (stcb == NULL) {
1770 return (0);
1771 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001772 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
tuexendd729232011-11-01 23:04:43 +00001773 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1774 /* It is a duplicate */
1775 SCTP_STAT_INCR(sctps_recvdupdata);
1776 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1777 /* Record a dup for the next outbound sack */
1778 asoc->dup_tsns[asoc->numduptsns] = tsn;
1779 asoc->numduptsns++;
1780 }
1781 asoc->send_sack = 1;
1782 return (0);
1783 }
1784 /* Calculate the number of TSN's between the base and this TSN */
1785 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1786 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1787 /* Can't hold the bit in the mapping at max array, toss it */
1788 return (0);
1789 }
1790 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1791 SCTP_TCB_LOCK_ASSERT(stcb);
1792 if (sctp_expand_mapping_array(asoc, gap)) {
1793 /* Can't expand, drop it */
1794 return (0);
1795 }
1796 }
1797 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1798 *high_tsn = tsn;
1799 }
1800 /* See if we have received this one already */
1801 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1802 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1803 SCTP_STAT_INCR(sctps_recvdupdata);
1804 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1805 /* Record a dup for the next outbound sack */
1806 asoc->dup_tsns[asoc->numduptsns] = tsn;
1807 asoc->numduptsns++;
1808 }
1809 asoc->send_sack = 1;
1810 return (0);
1811 }
1812 /*
1813 * Check to see about the GONE flag, duplicates would cause a sack
1814 * to be sent up above
1815 */
1816 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1817 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
t00fcxen08f9ff92014-03-16 13:38:54 +00001818 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
tuexendd729232011-11-01 23:04:43 +00001819 /*
1820 * wait a minute, this guy is gone, there is no longer a
1821 * receiver. Send peer an ABORT!
1822 */
t00fcxen08f9ff92014-03-16 13:38:54 +00001823 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
Michael Tuexen1ade45c2021-07-09 23:32:42 +02001824 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00001825 *abort_flag = 1;
1826 return (0);
1827 }
1828 /*
1829 * Now before going further we see if there is room. If NOT then we
1830 * MAY let one through only IF this TSN is the one we are waiting
1831 * for on a partial delivery API.
1832 */
1833
Michael Tuexene5001952016-04-17 19:25:27 +02001834 /* Is the stream valid? */
Michael Tuexen00657ac2016-12-07 21:53:26 +01001835 if (sid >= asoc->streamincnt) {
Michael Tuexenf39c4292015-09-12 19:39:48 +02001836 struct sctp_error_invalid_stream *cause;
tuexendd729232011-11-01 23:04:43 +00001837
Michael Tuexenf39c4292015-09-12 19:39:48 +02001838 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1839 0, M_NOWAIT, 1, MT_DATA);
1840 if (op_err != NULL) {
tuexendd729232011-11-01 23:04:43 +00001841 /* add some space up front so prepend will work well */
Michael Tuexenf39c4292015-09-12 19:39:48 +02001842 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1843 cause = mtod(op_err, struct sctp_error_invalid_stream *);
tuexendd729232011-11-01 23:04:43 +00001844 /*
1845 * Error causes are just param's and this one has
1846 * two back to back phdr, one with the error type
1847 * and size, the other with the streamid and a rsvd
1848 */
Michael Tuexenf39c4292015-09-12 19:39:48 +02001849 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1850 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1851 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001852 cause->stream_id = htons(sid);
Michael Tuexenf39c4292015-09-12 19:39:48 +02001853 cause->reserved = htons(0);
1854 sctp_queue_op_err(stcb, op_err);
tuexendd729232011-11-01 23:04:43 +00001855 }
1856 SCTP_STAT_INCR(sctps_badsid);
1857 SCTP_TCB_LOCK_ASSERT(stcb);
1858 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1859 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1860 asoc->highest_tsn_inside_nr_map = tsn;
1861 }
1862 if (tsn == (asoc->cumulative_tsn + 1)) {
1863 /* Update cum-ack */
1864 asoc->cumulative_tsn = tsn;
1865 }
1866 return (0);
1867 }
1868 /*
Michael Tuexene5001952016-04-17 19:25:27 +02001869 * If its a fragmented message, lets see if we can
1870 * find the control on the reassembly queues.
tuexendd729232011-11-01 23:04:43 +00001871 */
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001872 if ((chk_type == SCTP_IDATA) &&
1873 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001874 (fsn == 0)) {
Michael Tuexend3331282020-02-03 23:14:00 +01001875 /*
1876 * The first *must* be fsn 0, and other
Michael Tuexene5001952016-04-17 19:25:27 +02001877 * (middle/end) pieces can *not* be fsn 0.
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001878 * XXX: This can happen in case of a wrap around.
1879 * Ignore is for now.
Michael Tuexene5001952016-04-17 19:25:27 +02001880 */
Michael Tuexenedd369d2020-05-19 09:42:15 +02001881 SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
Michael Tuexene5001952016-04-17 19:25:27 +02001882 goto err_out;
1883 }
Michael Tuexene411f662016-12-17 23:36:21 +01001884 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001885 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001886 chk_flags, control);
1887 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
Michael Tuexene5001952016-04-17 19:25:27 +02001888 /* See if we can find the re-assembly entity */
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001889 if (control != NULL) {
Michael Tuexene5001952016-04-17 19:25:27 +02001890 /* We found something, does it belong? */
Michael Tuexen00657ac2016-12-07 21:53:26 +01001891 if (ordered && (mid != control->mid)) {
Michael Tuexenedd369d2020-05-19 09:42:15 +02001892 SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
Michael Tuexene5001952016-04-17 19:25:27 +02001893 err_out:
1894 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexen555c8e82020-07-23 03:43:26 +02001895 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
Michael Tuexen1ade45c2021-07-09 23:32:42 +02001896 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +02001897 *abort_flag = 1;
1898 return (0);
1899 }
1900 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1901 /* We can't have a switched order with an unordered chunk */
Michael Tuexenedd369d2020-05-19 09:42:15 +02001902 SCTP_SNPRINTF(msg, sizeof(msg),
1903 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1904 tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001905 goto err_out;
1906 }
1907 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1908 /* We can't have a switched unordered with a ordered chunk */
Michael Tuexenedd369d2020-05-19 09:42:15 +02001909 SCTP_SNPRINTF(msg, sizeof(msg),
Michael Tuexend98d2c42020-05-18 14:09:04 +02001910 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
Michael Tuexenedd369d2020-05-19 09:42:15 +02001911 tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001912 goto err_out;
1913 }
1914 }
1915 } else {
1916 /* Its a complete segment. Lets validate we
1917 * don't have a re-assembly going on with
1918 * the same Stream/Seq (for ordered) or in
1919 * the same Stream for unordered.
1920 */
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001921 if (control != NULL) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01001922 if (ordered || asoc->idata_supported) {
1923 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001924 chk_flags, mid);
Michael Tuexenedd369d2020-05-19 09:42:15 +02001925 SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001926 goto err_out;
1927 } else {
1928 if ((tsn == control->fsn_included + 1) &&
1929 (control->end_added == 0)) {
Michael Tuexenedd369d2020-05-19 09:42:15 +02001930 SCTP_SNPRINTF(msg, sizeof(msg),
1931 "Illegal message sequence, missing end for MID: %8.8x",
1932 control->fsn_included);
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001933 goto err_out;
1934 } else {
1935 control = NULL;
1936 }
1937 }
Michael Tuexene5001952016-04-17 19:25:27 +02001938 }
1939 }
1940 /* now do the tests */
1941 if (((asoc->cnt_on_all_streams +
1942 asoc->cnt_on_reasm_queue +
1943 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1944 (((int)asoc->my_rwnd) <= 0)) {
1945 /*
1946 * When we have NO room in the rwnd we check to make sure
1947 * the reader is doing its job...
1948 */
Michael Tuexen13a3b532022-05-15 22:51:57 +02001949 if (SCTP_SBAVAIL(&stcb->sctp_socket->so_rcv) > 0) {
Michael Tuexene5001952016-04-17 19:25:27 +02001950 /* some to read, wake-up */
Michael Tuexen5be0c252020-06-13 00:53:56 +02001951#if defined(__APPLE__) && !defined(__Userspace__)
Michael Tuexene5001952016-04-17 19:25:27 +02001952 struct socket *so;
1953
1954 so = SCTP_INP_SO(stcb->sctp_ep);
1955 atomic_add_int(&stcb->asoc.refcnt, 1);
1956 SCTP_TCB_UNLOCK(stcb);
1957 SCTP_SOCKET_LOCK(so, 1);
1958 SCTP_TCB_LOCK(stcb);
1959 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1960 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1961 /* assoc was freed while we were unlocked */
1962 SCTP_SOCKET_UNLOCK(so, 1);
1963 return (0);
1964 }
1965#endif
1966 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
Michael Tuexen5be0c252020-06-13 00:53:56 +02001967#if defined(__APPLE__) && !defined(__Userspace__)
Michael Tuexene5001952016-04-17 19:25:27 +02001968 SCTP_SOCKET_UNLOCK(so, 1);
1969#endif
1970 }
1971 /* now is it in the mapping array of what we have accepted? */
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001972 if (chk_type == SCTP_DATA) {
Michael Tuexene5001952016-04-17 19:25:27 +02001973 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1974 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1975 /* Nope not in the valid range dump it */
1976 dump_packet:
1977 sctp_set_rwnd(stcb, asoc);
1978 if ((asoc->cnt_on_all_streams +
1979 asoc->cnt_on_reasm_queue +
1980 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1981 SCTP_STAT_INCR(sctps_datadropchklmt);
1982 } else {
1983 SCTP_STAT_INCR(sctps_datadroprwnd);
1984 }
1985 *break_flag = 1;
1986 return (0);
1987 }
1988 } else {
1989 if (control == NULL) {
1990 goto dump_packet;
1991 }
1992 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1993 goto dump_packet;
1994 }
1995 }
1996 }
tuexendd729232011-11-01 23:04:43 +00001997#ifdef SCTP_ASOCLOG_OF_TSNS
1998 SCTP_TCB_LOCK_ASSERT(stcb);
1999 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
2000 asoc->tsn_in_at = 0;
2001 asoc->tsn_in_wrapped = 1;
2002 }
2003 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
Michael Tuexen00657ac2016-12-07 21:53:26 +01002004 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
2005 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
tuexendd729232011-11-01 23:04:43 +00002006 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
2007 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
2008 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
2009 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
2010 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
2011 asoc->tsn_in_at++;
2012#endif
Michael Tuexene5001952016-04-17 19:25:27 +02002013 /*
2014 * Before we continue lets validate that we are not being fooled by
2015 * an evil attacker. We can only have Nk chunks based on our TSN
2016 * spread allowed by the mapping array N * 8 bits, so there is no
2017 * way our stream sequence numbers could have wrapped. We of course
2018 * only validate the FIRST fragment so the bit must be set.
2019 */
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002020 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
tuexendd729232011-11-01 23:04:43 +00002021 (TAILQ_EMPTY(&asoc->resetHead)) &&
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002022 (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
Michael Tuexen00657ac2016-12-07 21:53:26 +01002023 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
tuexendd729232011-11-01 23:04:43 +00002024 /* The incoming sseq is behind where we last delivered? */
Michael Tuexeneccb4be2016-04-18 08:58:59 +02002025 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01002026 mid, asoc->strmin[sid].last_mid_delivered);
tuexendd729232011-11-01 23:04:43 +00002027
Michael Tuexen00657ac2016-12-07 21:53:26 +01002028 if (asoc->idata_supported) {
Michael Tuexenedd369d2020-05-19 09:42:15 +02002029 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2030 asoc->strmin[sid].last_mid_delivered,
2031 tsn,
2032 sid,
2033 mid);
Michael Tuexen00657ac2016-12-07 21:53:26 +01002034 } else {
Michael Tuexenedd369d2020-05-19 09:42:15 +02002035 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2036 (uint16_t)asoc->strmin[sid].last_mid_delivered,
2037 tsn,
2038 sid,
2039 (uint16_t)mid);
Michael Tuexen00657ac2016-12-07 21:53:26 +01002040 }
t00fcxen08f9ff92014-03-16 13:38:54 +00002041 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexen555c8e82020-07-23 03:43:26 +02002042 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
Michael Tuexen1ade45c2021-07-09 23:32:42 +02002043 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00002044 *abort_flag = 1;
2045 return (0);
2046 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002047 if (chk_type == SCTP_IDATA) {
Michael Tuexene5001952016-04-17 19:25:27 +02002048 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2049 } else {
2050 the_len = (chk_length - sizeof(struct sctp_data_chunk));
2051 }
tuexendd729232011-11-01 23:04:43 +00002052 if (last_chunk == 0) {
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002053 if (chk_type == SCTP_IDATA) {
Michael Tuexene5001952016-04-17 19:25:27 +02002054 dmbuf = SCTP_M_COPYM(*m,
2055 (offset + sizeof(struct sctp_idata_chunk)),
2056 the_len, M_NOWAIT);
2057 } else {
2058 dmbuf = SCTP_M_COPYM(*m,
2059 (offset + sizeof(struct sctp_data_chunk)),
2060 the_len, M_NOWAIT);
2061 }
tuexendd729232011-11-01 23:04:43 +00002062#ifdef SCTP_MBUF_LOGGING
2063 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
t00fcxen8285bce2015-01-10 21:09:55 +00002064 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
tuexendd729232011-11-01 23:04:43 +00002065 }
2066#endif
2067 } else {
2068 /* We can steal the last chunk */
2069 int l_len;
2070 dmbuf = *m;
2071 /* lop off the top part */
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002072 if (chk_type == SCTP_IDATA) {
Michael Tuexene5001952016-04-17 19:25:27 +02002073 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2074 } else {
2075 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2076 }
tuexendd729232011-11-01 23:04:43 +00002077 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2078 l_len = SCTP_BUF_LEN(dmbuf);
2079 } else {
2080 /* need to count up the size hopefully
2081 * does not hit this to often :-0
2082 */
2083 struct mbuf *lat;
tuexen63fc0bb2011-12-27 12:24:52 +00002084
tuexendd729232011-11-01 23:04:43 +00002085 l_len = 0;
tuexen63fc0bb2011-12-27 12:24:52 +00002086 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
tuexendd729232011-11-01 23:04:43 +00002087 l_len += SCTP_BUF_LEN(lat);
tuexendd729232011-11-01 23:04:43 +00002088 }
2089 }
2090 if (l_len > the_len) {
2091 /* Trim the end round bytes off too */
2092 m_adj(dmbuf, -(l_len - the_len));
2093 }
2094 }
2095 if (dmbuf == NULL) {
2096 SCTP_STAT_INCR(sctps_nomem);
2097 return (0);
2098 }
Michael Tuexene5001952016-04-17 19:25:27 +02002099 /*
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002100 * Now no matter what, we need a control, get one
Michael Tuexene5001952016-04-17 19:25:27 +02002101 * if we don't have one (we may have gotten it
2102 * above when we found the message was fragmented
2103 */
2104 if (control == NULL) {
2105 sctp_alloc_a_readq(stcb, control);
2106 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
Michael Tuexen00657ac2016-12-07 21:53:26 +01002107 ppid,
2108 sid,
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002109 chk_flags,
Michael Tuexen00657ac2016-12-07 21:53:26 +01002110 NULL, fsn, mid);
Michael Tuexene5001952016-04-17 19:25:27 +02002111 if (control == NULL) {
2112 SCTP_STAT_INCR(sctps_nomem);
2113 return (0);
2114 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002115 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
Michael Tuexen3381e772017-07-19 17:14:51 +02002116 struct mbuf *mm;
2117
Michael Tuexene5001952016-04-17 19:25:27 +02002118 control->data = dmbuf;
Michael Tuexenc38740e2019-10-06 10:52:55 +02002119 control->tail_mbuf = NULL;
Michael Tuexen3381e772017-07-19 17:14:51 +02002120 for (mm = control->data; mm; mm = mm->m_next) {
2121 control->length += SCTP_BUF_LEN(mm);
Michael Tuexenc38740e2019-10-06 10:52:55 +02002122 if (SCTP_BUF_NEXT(mm) == NULL) {
2123 control->tail_mbuf = mm;
2124 }
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002125 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002126 control->end_added = 1;
2127 control->last_frag_seen = 1;
2128 control->first_frag_seen = 1;
2129 control->fsn_included = fsn;
2130 control->top_fsn = fsn;
Michael Tuexene5001952016-04-17 19:25:27 +02002131 }
2132 created_control = 1;
2133 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01002134 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002135 chk_flags, ordered, mid, control);
2136 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
tuexendd729232011-11-01 23:04:43 +00002137 TAILQ_EMPTY(&asoc->resetHead) &&
2138 ((ordered == 0) ||
Michael Tuexen00657ac2016-12-07 21:53:26 +01002139 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2140 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
tuexendd729232011-11-01 23:04:43 +00002141 /* Candidate for express delivery */
2142 /*
2143 * Its not fragmented, No PD-API is up, Nothing in the
2144 * delivery queue, Its un-ordered OR ordered and the next to
2145 * deliver AND nothing else is stuck on the stream queue,
2146 * And there is room for it in the socket buffer. Lets just
2147 * stuff it up the buffer....
2148 */
tuexendd729232011-11-01 23:04:43 +00002149 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2150 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2151 asoc->highest_tsn_inside_nr_map = tsn;
2152 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01002153 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2154 control, mid);
Michael Tuexene5001952016-04-17 19:25:27 +02002155
tuexendd729232011-11-01 23:04:43 +00002156 sctp_add_to_readq(stcb->sctp_ep, stcb,
2157 control, &stcb->sctp_socket->so_rcv,
2158 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2159
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002160 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
tuexendd729232011-11-01 23:04:43 +00002161 /* for ordered, bump what we delivered */
Michael Tuexene411f662016-12-17 23:36:21 +01002162 asoc->strmin[sid].last_mid_delivered++;
tuexendd729232011-11-01 23:04:43 +00002163 }
2164 SCTP_STAT_INCR(sctps_recvexpress);
2165 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01002166 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
tuexendd729232011-11-01 23:04:43 +00002167 SCTP_STR_LOG_FROM_EXPRS_DEL);
2168 }
2169 control = NULL;
tuexendd729232011-11-01 23:04:43 +00002170 goto finish_express_del;
2171 }
tuexen63fc0bb2011-12-27 12:24:52 +00002172
Michael Tuexene5001952016-04-17 19:25:27 +02002173 /* Now will we need a chunk too? */
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002174 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
tuexendd729232011-11-01 23:04:43 +00002175 sctp_alloc_a_chunk(stcb, chk);
2176 if (chk == NULL) {
2177 /* No memory so we drop the chunk */
2178 SCTP_STAT_INCR(sctps_nomem);
2179 if (last_chunk == 0) {
2180 /* we copied it, free the copy */
2181 sctp_m_freem(dmbuf);
2182 }
2183 return (0);
2184 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01002185 chk->rec.data.tsn = tsn;
tuexendd729232011-11-01 23:04:43 +00002186 chk->no_fr_allowed = 0;
Michael Tuexen00657ac2016-12-07 21:53:26 +01002187 chk->rec.data.fsn = fsn;
2188 chk->rec.data.mid = mid;
2189 chk->rec.data.sid = sid;
2190 chk->rec.data.ppid = ppid;
tuexendd729232011-11-01 23:04:43 +00002191 chk->rec.data.context = stcb->asoc.context;
2192 chk->rec.data.doing_fast_retransmit = 0;
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002193 chk->rec.data.rcv_flags = chk_flags;
tuexendd729232011-11-01 23:04:43 +00002194 chk->asoc = asoc;
2195 chk->send_size = the_len;
2196 chk->whoTo = net;
Michael Tuexen00657ac2016-12-07 21:53:26 +01002197 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
Michael Tuexene5001952016-04-17 19:25:27 +02002198 chk,
Michael Tuexen00657ac2016-12-07 21:53:26 +01002199 control, mid);
tuexendd729232011-11-01 23:04:43 +00002200 atomic_add_int(&net->ref_count, 1);
2201 chk->data = dmbuf;
Michael Tuexen3121b802016-04-10 23:28:19 +02002202 }
Michael Tuexene5001952016-04-17 19:25:27 +02002203 /* Set the appropriate TSN mark */
2204 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2205 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2206 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2207 asoc->highest_tsn_inside_nr_map = tsn;
2208 }
2209 } else {
2210 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2211 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2212 asoc->highest_tsn_inside_map = tsn;
2213 }
2214 }
2215 /* Now is it complete (i.e. not fragmented)? */
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002216 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
Michael Tuexene5001952016-04-17 19:25:27 +02002217 /*
2218 * Special check for when streams are resetting. We
2219 * could be more smart about this and check the
2220 * actual stream to see if it is not being reset..
2221 * that way we would not create a HOLB when amongst
2222 * streams being reset and those not being reset.
2223 *
2224 */
2225 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2226 SCTP_TSN_GT(tsn, liste->tsn)) {
Michael Tuexen3121b802016-04-10 23:28:19 +02002227 /*
Michael Tuexene5001952016-04-17 19:25:27 +02002228 * yep its past where we need to reset... go
2229 * ahead and queue it.
Michael Tuexen3121b802016-04-10 23:28:19 +02002230 */
Michael Tuexene5001952016-04-17 19:25:27 +02002231 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2232 /* first one on */
2233 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2234 } else {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002235 struct sctp_queued_to_read *lcontrol, *nlcontrol;
Michael Tuexene5001952016-04-17 19:25:27 +02002236 unsigned char inserted = 0;
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002237 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2238 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
Michael Tuexene5001952016-04-17 19:25:27 +02002239 continue;
2240 } else {
2241 /* found it */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002242 TAILQ_INSERT_BEFORE(lcontrol, control, next);
Michael Tuexene5001952016-04-17 19:25:27 +02002243 inserted = 1;
2244 break;
2245 }
Michael Tuexen3121b802016-04-10 23:28:19 +02002246 }
Michael Tuexene5001952016-04-17 19:25:27 +02002247 if (inserted == 0) {
2248 /*
2249 * must be put at end, use
2250 * prevP (all setup from
2251 * loop) to setup nextP.
2252 */
2253 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2254 }
2255 }
2256 goto finish_express_del;
2257 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002258 if (chk_flags & SCTP_DATA_UNORDERED) {
Michael Tuexene5001952016-04-17 19:25:27 +02002259 /* queue directly into socket buffer */
Michael Tuexen00657ac2016-12-07 21:53:26 +01002260 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2261 control, mid);
Michael Tuexene5001952016-04-17 19:25:27 +02002262 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2263 sctp_add_to_readq(stcb->sctp_ep, stcb,
2264 control,
2265 &stcb->sctp_socket->so_rcv, 1,
2266 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2267
2268 } else {
Michael Tuexen00657ac2016-12-07 21:53:26 +01002269 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2270 mid);
Michael Tuexene411f662016-12-17 23:36:21 +01002271 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
Michael Tuexene5001952016-04-17 19:25:27 +02002272 if (*abort_flag) {
t00fcxen2ed8f3d2014-04-23 21:28:37 +00002273 if (last_chunk) {
2274 *m = NULL;
2275 }
tuexendd729232011-11-01 23:04:43 +00002276 return (0);
tuexendd729232011-11-01 23:04:43 +00002277 }
2278 }
Michael Tuexene5001952016-04-17 19:25:27 +02002279 goto finish_express_del;
2280 }
2281 /* If we reach here its a reassembly */
2282 need_reasm_check = 1;
2283 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexen00657ac2016-12-07 21:53:26 +01002284 "Queue data to stream for reasm control: %p MID: %u\n",
2285 control, mid);
Michael Tuexene411f662016-12-17 23:36:21 +01002286 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02002287 if (*abort_flag) {
2288 /*
2289 * the assoc is now gone and chk was put onto the
2290 * reasm queue, which has all been freed.
2291 */
2292 if (last_chunk) {
2293 *m = NULL;
tuexendd729232011-11-01 23:04:43 +00002294 }
Michael Tuexene5001952016-04-17 19:25:27 +02002295 return (0);
tuexendd729232011-11-01 23:04:43 +00002296 }
2297finish_express_del:
Michael Tuexene5001952016-04-17 19:25:27 +02002298 /* Here we tidy up things */
tuexen15f99d82012-04-19 16:08:38 +00002299 if (tsn == (asoc->cumulative_tsn + 1)) {
2300 /* Update cum-ack */
2301 asoc->cumulative_tsn = tsn;
tuexendd729232011-11-01 23:04:43 +00002302 }
2303 if (last_chunk) {
2304 *m = NULL;
2305 }
2306 if (ordered) {
2307 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2308 } else {
2309 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2310 }
2311 SCTP_STAT_INCR(sctps_recvdata);
2312 /* Set it present please */
2313 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01002314 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
tuexendd729232011-11-01 23:04:43 +00002315 }
2316 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2317 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2318 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2319 }
Michael Tuexene411f662016-12-17 23:36:21 +01002320 if (need_reasm_check) {
2321 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2322 need_reasm_check = 0;
2323 }
tuexendd729232011-11-01 23:04:43 +00002324 /* check the special flag for stream resets */
2325 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2326 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2327 /*
2328 * we have finished working through the backlogged TSN's now
2329 * time to reset streams. 1: call reset function. 2: free
2330 * pending_reply space 3: distribute any chunks in
2331 * pending_reply_queue.
2332 */
t00fcxen0f0d87f2012-09-07 13:42:20 +00002333 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
tuexendd729232011-11-01 23:04:43 +00002334 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
Michael Tüxen6b4d2922015-07-22 13:55:48 +02002335 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
tuexendd729232011-11-01 23:04:43 +00002336 SCTP_FREE(liste, SCTP_M_STRESET);
2337 /*sa_ignore FREED_MEMORY*/
2338 liste = TAILQ_FIRST(&asoc->resetHead);
2339 if (TAILQ_EMPTY(&asoc->resetHead)) {
2340 /* All can be removed */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002341 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2342 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
Michael Tuexenffed0922020-07-10 13:24:15 +02002343 strm = &asoc->strmin[control->sinfo_stream];
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002344 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
tuexendd729232011-11-01 23:04:43 +00002345 if (*abort_flag) {
tuexen63fc0bb2011-12-27 12:24:52 +00002346 return (0);
tuexendd729232011-11-01 23:04:43 +00002347 }
Michael Tuexene411f662016-12-17 23:36:21 +01002348 if (need_reasm_check) {
Michael Tuexenffed0922020-07-10 13:24:15 +02002349 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
Michael Tuexene411f662016-12-17 23:36:21 +01002350 need_reasm_check = 0;
2351 }
tuexendd729232011-11-01 23:04:43 +00002352 }
2353 } else {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002354 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2355 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
tuexendd729232011-11-01 23:04:43 +00002356 break;
2357 }
2358 /*
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002359 * if control->sinfo_tsn is <= liste->tsn we can
tuexendd729232011-11-01 23:04:43 +00002360 * process it which is the NOT of
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002361 * control->sinfo_tsn > liste->tsn
tuexendd729232011-11-01 23:04:43 +00002362 */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002363 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
Michael Tuexenffed0922020-07-10 13:24:15 +02002364 strm = &asoc->strmin[control->sinfo_stream];
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002365 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
tuexendd729232011-11-01 23:04:43 +00002366 if (*abort_flag) {
tuexen63fc0bb2011-12-27 12:24:52 +00002367 return (0);
tuexendd729232011-11-01 23:04:43 +00002368 }
Michael Tuexene411f662016-12-17 23:36:21 +01002369 if (need_reasm_check) {
Michael Tuexenffed0922020-07-10 13:24:15 +02002370 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
Michael Tuexene411f662016-12-17 23:36:21 +01002371 need_reasm_check = 0;
2372 }
tuexendd729232011-11-01 23:04:43 +00002373 }
2374 }
tuexendd729232011-11-01 23:04:43 +00002375 }
2376 return (1);
2377}
2378
Michael Tuexen81055222016-03-23 14:40:10 +01002379static const int8_t sctp_map_lookup_tab[256] = {
tuexendd729232011-11-01 23:04:43 +00002380 0, 1, 0, 2, 0, 1, 0, 3,
2381 0, 1, 0, 2, 0, 1, 0, 4,
2382 0, 1, 0, 2, 0, 1, 0, 3,
2383 0, 1, 0, 2, 0, 1, 0, 5,
2384 0, 1, 0, 2, 0, 1, 0, 3,
2385 0, 1, 0, 2, 0, 1, 0, 4,
2386 0, 1, 0, 2, 0, 1, 0, 3,
2387 0, 1, 0, 2, 0, 1, 0, 6,
2388 0, 1, 0, 2, 0, 1, 0, 3,
2389 0, 1, 0, 2, 0, 1, 0, 4,
2390 0, 1, 0, 2, 0, 1, 0, 3,
2391 0, 1, 0, 2, 0, 1, 0, 5,
2392 0, 1, 0, 2, 0, 1, 0, 3,
2393 0, 1, 0, 2, 0, 1, 0, 4,
2394 0, 1, 0, 2, 0, 1, 0, 3,
2395 0, 1, 0, 2, 0, 1, 0, 7,
2396 0, 1, 0, 2, 0, 1, 0, 3,
2397 0, 1, 0, 2, 0, 1, 0, 4,
2398 0, 1, 0, 2, 0, 1, 0, 3,
2399 0, 1, 0, 2, 0, 1, 0, 5,
2400 0, 1, 0, 2, 0, 1, 0, 3,
2401 0, 1, 0, 2, 0, 1, 0, 4,
2402 0, 1, 0, 2, 0, 1, 0, 3,
2403 0, 1, 0, 2, 0, 1, 0, 6,
2404 0, 1, 0, 2, 0, 1, 0, 3,
2405 0, 1, 0, 2, 0, 1, 0, 4,
2406 0, 1, 0, 2, 0, 1, 0, 3,
2407 0, 1, 0, 2, 0, 1, 0, 5,
2408 0, 1, 0, 2, 0, 1, 0, 3,
2409 0, 1, 0, 2, 0, 1, 0, 4,
2410 0, 1, 0, 2, 0, 1, 0, 3,
2411 0, 1, 0, 2, 0, 1, 0, 8
2412};
2413
tuexendd729232011-11-01 23:04:43 +00002414void
2415sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2416{
2417 /*
2418 * Now we also need to check the mapping array in a couple of ways.
2419 * 1) Did we move the cum-ack point?
2420 *
2421 * When you first glance at this you might think
Michael Tuexen34488e72016-05-03 22:11:59 +02002422 * that all entries that make up the position
tuexendd729232011-11-01 23:04:43 +00002423 * of the cum-ack would be in the nr-mapping array
2424 * only.. i.e. things up to the cum-ack are always
2425 * deliverable. Thats true with one exception, when
2426 * its a fragmented message we may not deliver the data
2427 * until some threshold (or all of it) is in place. So
2428 * we must OR the nr_mapping_array and mapping_array to
2429 * get a true picture of the cum-ack.
2430 */
2431 struct sctp_association *asoc;
2432 int at;
2433 uint8_t val;
2434 int slide_from, slide_end, lgap, distance;
2435 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2436
2437 asoc = &stcb->asoc;
tuexendd729232011-11-01 23:04:43 +00002438
2439 old_cumack = asoc->cumulative_tsn;
2440 old_base = asoc->mapping_array_base_tsn;
2441 old_highest = asoc->highest_tsn_inside_map;
2442 /*
2443 * We could probably improve this a small bit by calculating the
2444 * offset of the current cum-ack as the starting point.
2445 */
2446 at = 0;
2447 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2448 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2449 if (val == 0xff) {
2450 at += 8;
2451 } else {
2452 /* there is a 0 bit */
2453 at += sctp_map_lookup_tab[val];
2454 break;
2455 }
2456 }
2457 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at-1);
2458
2459 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2460 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2461#ifdef INVARIANTS
2462 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2463 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2464#else
2465 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2466 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2467 sctp_print_mapping_array(asoc);
2468 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2469 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2470 }
2471 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2472 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2473#endif
2474 }
2475 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2476 highest_tsn = asoc->highest_tsn_inside_nr_map;
2477 } else {
2478 highest_tsn = asoc->highest_tsn_inside_map;
2479 }
2480 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2481 /* The complete array was completed by a single FR */
2482 /* highest becomes the cum-ack */
2483 int clr;
2484#ifdef INVARIANTS
2485 unsigned int i;
2486#endif
2487
2488 /* clear the array */
2489 clr = ((at+7) >> 3);
2490 if (clr > asoc->mapping_array_size) {
2491 clr = asoc->mapping_array_size;
2492 }
2493 memset(asoc->mapping_array, 0, clr);
2494 memset(asoc->nr_mapping_array, 0, clr);
2495#ifdef INVARIANTS
2496 for (i = 0; i < asoc->mapping_array_size; i++) {
2497 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
tuexencb5fe8d2012-05-04 09:50:27 +00002498 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
tuexendd729232011-11-01 23:04:43 +00002499 sctp_print_mapping_array(asoc);
2500 }
2501 }
2502#endif
2503 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2504 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2505 } else if (at >= 8) {
2506 /* we can slide the mapping array down */
2507 /* slide_from holds where we hit the first NON 0xff byte */
2508
2509 /*
2510 * now calculate the ceiling of the move using our highest
2511 * TSN value
2512 */
2513 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2514 slide_end = (lgap >> 3);
2515 if (slide_end < slide_from) {
2516 sctp_print_mapping_array(asoc);
2517#ifdef INVARIANTS
2518 panic("impossible slide");
2519#else
Michael Tuexeneccb4be2016-04-18 08:58:59 +02002520 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
tuexencb5fe8d2012-05-04 09:50:27 +00002521 lgap, slide_end, slide_from, at);
tuexendd729232011-11-01 23:04:43 +00002522 return;
2523#endif
2524 }
2525 if (slide_end > asoc->mapping_array_size) {
2526#ifdef INVARIANTS
2527 panic("would overrun buffer");
2528#else
Michael Tuexeneccb4be2016-04-18 08:58:59 +02002529 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
tuexencb5fe8d2012-05-04 09:50:27 +00002530 asoc->mapping_array_size, slide_end);
tuexendd729232011-11-01 23:04:43 +00002531 slide_end = asoc->mapping_array_size;
2532#endif
2533 }
2534 distance = (slide_end - slide_from) + 1;
2535 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2536 sctp_log_map(old_base, old_cumack, old_highest,
2537 SCTP_MAP_PREPARE_SLIDE);
2538 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2539 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2540 }
2541 if (distance + slide_from > asoc->mapping_array_size ||
2542 distance < 0) {
2543 /*
2544 * Here we do NOT slide forward the array so that
2545 * hopefully when more data comes in to fill it up
2546 * we will be able to slide it forward. Really I
2547 * don't think this should happen :-0
2548 */
tuexendd729232011-11-01 23:04:43 +00002549 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2550 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2551 (uint32_t) asoc->mapping_array_size,
2552 SCTP_MAP_SLIDE_NONE);
2553 }
2554 } else {
2555 int ii;
2556
2557 for (ii = 0; ii < distance; ii++) {
2558 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2559 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
tuexendd729232011-11-01 23:04:43 +00002560 }
2561 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2562 asoc->mapping_array[ii] = 0;
2563 asoc->nr_mapping_array[ii] = 0;
2564 }
2565 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2566 asoc->highest_tsn_inside_map += (slide_from << 3);
2567 }
2568 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2569 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2570 }
2571 asoc->mapping_array_base_tsn += (slide_from << 3);
2572 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2573 sctp_log_map(asoc->mapping_array_base_tsn,
2574 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2575 SCTP_MAP_SLIDE_RESULT);
2576 }
2577 }
2578 }
2579}
2580
2581void
tuexen9784e9a2011-12-18 13:04:23 +00002582sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
tuexendd729232011-11-01 23:04:43 +00002583{
2584 struct sctp_association *asoc;
2585 uint32_t highest_tsn;
Michael Tuexen753e4452016-12-09 19:20:11 +01002586 int is_a_gap;
tuexen9784e9a2011-12-18 13:04:23 +00002587
Michael Tuexen753e4452016-12-09 19:20:11 +01002588 sctp_slide_mapping_arrays(stcb);
tuexendd729232011-11-01 23:04:43 +00002589 asoc = &stcb->asoc;
2590 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2591 highest_tsn = asoc->highest_tsn_inside_nr_map;
2592 } else {
2593 highest_tsn = asoc->highest_tsn_inside_map;
2594 }
Michael Tuexen753e4452016-12-09 19:20:11 +01002595 /* Is there a gap now? */
2596 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
tuexendd729232011-11-01 23:04:43 +00002597
2598 /*
2599 * Now we need to see if we need to queue a sack or just start the
2600 * timer (if allowed).
2601 */
Michael Tuexen348a36c2018-08-13 16:24:47 +02002602 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
tuexendd729232011-11-01 23:04:43 +00002603 /*
2604 * Ok special case, in SHUTDOWN-SENT case. here we
2605 * maker sure SACK timer is off and instead send a
2606 * SHUTDOWN and a SACK
2607 */
2608 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2609 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
t00fcxen0057a6d2015-05-28 16:42:49 +00002610 stcb->sctp_ep, stcb, NULL,
Michael Tuexen555c8e82020-07-23 03:43:26 +02002611 SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
tuexendd729232011-11-01 23:04:43 +00002612 }
tuexen15f99d82012-04-19 16:08:38 +00002613 sctp_send_shutdown(stcb,
Michael Tuexen753e4452016-12-09 19:20:11 +01002614 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2615 if (is_a_gap) {
2616 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2617 }
tuexendd729232011-11-01 23:04:43 +00002618 } else {
tuexendd729232011-11-01 23:04:43 +00002619 /*
2620 * CMT DAC algorithm: increase number of packets
2621 * received since last ack
2622 */
2623 stcb->asoc.cmt_dac_pkts_rcvd++;
tuexen15f99d82012-04-19 16:08:38 +00002624
tuexendd729232011-11-01 23:04:43 +00002625 if ((stcb->asoc.send_sack == 1) || /* We need to send a SACK */
2626 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2627 * longer is one */
2628 (stcb->asoc.numduptsns) || /* we have dup's */
2629 (is_a_gap) || /* is still a gap */
2630 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
Michael Tuexenf7f9cd82020-09-24 15:14:13 +02002631 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)) { /* hit limit of pkts */
tuexen63fc0bb2011-12-27 12:24:52 +00002632 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
tuexendd729232011-11-01 23:04:43 +00002633 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2634 (stcb->asoc.send_sack == 0) &&
2635 (stcb->asoc.numduptsns == 0) &&
2636 (stcb->asoc.delayed_ack) &&
2637 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
tuexendd729232011-11-01 23:04:43 +00002638 /*
2639 * CMT DAC algorithm: With CMT,
2640 * delay acks even in the face of
tuexendd729232011-11-01 23:04:43 +00002641 * reordering. Therefore, if acks
2642 * that do not have to be sent
2643 * because of the above reasons,
2644 * will be delayed. That is, acks
2645 * that would have been sent due to
2646 * gap reports will be delayed with
2647 * DAC. Start the delayed ack timer.
2648 */
2649 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2650 stcb->sctp_ep, stcb, NULL);
2651 } else {
2652 /*
2653 * Ok we must build a SACK since the
2654 * timer is pending, we got our
2655 * first packet OR there are gaps or
2656 * duplicates.
2657 */
Michael Tuexen61f66832020-02-09 23:15:19 +01002658 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
Michael Tuexen555c8e82020-07-23 03:43:26 +02002659 SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
tuexendd729232011-11-01 23:04:43 +00002660 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2661 }
2662 } else {
2663 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2664 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2665 stcb->sctp_ep, stcb, NULL);
2666 }
2667 }
2668 }
2669}
2670
tuexendd729232011-11-01 23:04:43 +00002671int
2672sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
Michael Tüxen9843e062015-08-02 18:10:36 +02002673 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2674 struct sctp_nets *net, uint32_t *high_tsn)
tuexendd729232011-11-01 23:04:43 +00002675{
Michael Tuexene5001952016-04-17 19:25:27 +02002676 struct sctp_chunkhdr *ch, chunk_buf;
tuexendd729232011-11-01 23:04:43 +00002677 struct sctp_association *asoc;
2678 int num_chunks = 0; /* number of control chunks processed */
2679 int stop_proc = 0;
Michael Tuexen48b98022017-10-18 23:12:24 +02002680 int break_flag, last_chunk;
tuexendd729232011-11-01 23:04:43 +00002681 int abort_flag = 0, was_a_gap;
2682 struct mbuf *m;
2683 uint32_t highest_tsn;
Michael Tuexen48b98022017-10-18 23:12:24 +02002684 uint16_t chk_length;
tuexendd729232011-11-01 23:04:43 +00002685
2686 /* set the rwnd */
2687 sctp_set_rwnd(stcb, &stcb->asoc);
2688
2689 m = *mm;
2690 SCTP_TCB_LOCK_ASSERT(stcb);
2691 asoc = &stcb->asoc;
2692 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2693 highest_tsn = asoc->highest_tsn_inside_nr_map;
2694 } else {
2695 highest_tsn = asoc->highest_tsn_inside_map;
2696 }
2697 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2698 /*
2699 * setup where we got the last DATA packet from for any SACK that
2700 * may need to go out. Don't bump the net. This is done ONLY when a
2701 * chunk is assigned.
2702 */
2703 asoc->last_data_chunk_from = net;
2704
tuexendd729232011-11-01 23:04:43 +00002705 /*-
2706 * Now before we proceed we must figure out if this is a wasted
2707 * cluster... i.e. it is a small packet sent in and yet the driver
2708 * underneath allocated a full cluster for it. If so we must copy it
2709 * to a smaller mbuf and free up the cluster mbuf. This will help
Michael Tuexen3b4263d2020-06-06 19:52:50 +02002710 * with cluster starvation.
tuexendd729232011-11-01 23:04:43 +00002711 */
2712 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2713 /* we only handle mbufs that are singletons.. not chains */
t00fcxen23c2b8f2012-12-10 20:15:50 +00002714 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
tuexendd729232011-11-01 23:04:43 +00002715 if (m) {
2716 /* ok lets see if we can copy the data up */
2717 caddr_t *from, *to;
2718 /* get the pointers and copy */
2719 to = mtod(m, caddr_t *);
2720 from = mtod((*mm), caddr_t *);
2721 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2722 /* copy the length and free up the old */
2723 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2724 sctp_m_freem(*mm);
Michael Tuexen34488e72016-05-03 22:11:59 +02002725 /* success, back copy */
tuexendd729232011-11-01 23:04:43 +00002726 *mm = m;
2727 } else {
2728 /* We are in trouble in the mbuf world .. yikes */
2729 m = *mm;
2730 }
2731 }
tuexendd729232011-11-01 23:04:43 +00002732 /* get pointer to the first chunk header */
Michael Tuexene5001952016-04-17 19:25:27 +02002733 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
Michael Tuexen97aebbe2017-10-18 22:22:05 +02002734 sizeof(struct sctp_chunkhdr),
2735 (uint8_t *)&chunk_buf);
tuexendd729232011-11-01 23:04:43 +00002736 if (ch == NULL) {
2737 return (1);
2738 }
2739 /*
2740 * process all DATA chunks...
2741 */
2742 *high_tsn = asoc->cumulative_tsn;
2743 break_flag = 0;
2744 asoc->data_pkts_seen++;
2745 while (stop_proc == 0) {
2746 /* validate chunk length */
Michael Tuexene5001952016-04-17 19:25:27 +02002747 chk_length = ntohs(ch->chunk_length);
tuexendd729232011-11-01 23:04:43 +00002748 if (length - *offset < chk_length) {
2749 /* all done, mutulated chunk */
2750 stop_proc = 1;
tuexen63fc0bb2011-12-27 12:24:52 +00002751 continue;
tuexendd729232011-11-01 23:04:43 +00002752 }
Michael Tuexene5001952016-04-17 19:25:27 +02002753 if ((asoc->idata_supported == 1) &&
2754 (ch->chunk_type == SCTP_DATA)) {
2755 struct mbuf *op_err;
2756 char msg[SCTP_DIAG_INFO_LEN];
2757
Michael Tuexen50db0d72020-07-08 18:11:47 +02002758 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
Michael Tuexene5001952016-04-17 19:25:27 +02002759 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexen555c8e82020-07-23 03:43:26 +02002760 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
Michael Tuexen1ade45c2021-07-09 23:32:42 +02002761 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +02002762 return (2);
2763 }
2764 if ((asoc->idata_supported == 0) &&
2765 (ch->chunk_type == SCTP_IDATA)) {
2766 struct mbuf *op_err;
2767 char msg[SCTP_DIAG_INFO_LEN];
2768
Michael Tuexen50db0d72020-07-08 18:11:47 +02002769 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
Michael Tuexene5001952016-04-17 19:25:27 +02002770 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexen555c8e82020-07-23 03:43:26 +02002771 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
Michael Tuexen1ade45c2021-07-09 23:32:42 +02002772 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +02002773 return (2);
2774 }
2775 if ((ch->chunk_type == SCTP_DATA) ||
2776 (ch->chunk_type == SCTP_IDATA)) {
Michael Tuexen48b98022017-10-18 23:12:24 +02002777 uint16_t clen;
Michael Tuexen66c84932016-04-18 11:42:41 +02002778
Michael Tuexene5001952016-04-17 19:25:27 +02002779 if (ch->chunk_type == SCTP_DATA) {
2780 clen = sizeof(struct sctp_data_chunk);
2781 } else {
2782 clen = sizeof(struct sctp_idata_chunk);
2783 }
Michael Tuexen66c84932016-04-18 11:42:41 +02002784 if (chk_length < clen) {
tuexendd729232011-11-01 23:04:43 +00002785 /*
2786 * Need to send an abort since we had a
2787 * invalid data chunk.
2788 */
2789 struct mbuf *op_err;
t00fcxen08f9ff92014-03-16 13:38:54 +00002790 char msg[SCTP_DIAG_INFO_LEN];
tuexendd729232011-11-01 23:04:43 +00002791
Michael Tuexenedd369d2020-05-19 09:42:15 +02002792 SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2793 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2794 chk_length);
t00fcxen08f9ff92014-03-16 13:38:54 +00002795 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexen555c8e82020-07-23 03:43:26 +02002796 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
Michael Tuexen1ade45c2021-07-09 23:32:42 +02002797 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
t00fcxen227f8db2014-04-19 19:44:25 +00002798 return (2);
2799 }
tuexendd729232011-11-01 23:04:43 +00002800#ifdef SCTP_AUDITING_ENABLED
2801 sctp_audit_log(0xB1, 0);
2802#endif
2803 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2804 last_chunk = 1;
2805 } else {
2806 last_chunk = 0;
2807 }
Michael Tuexend3331282020-02-03 23:14:00 +01002808 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
tuexendd729232011-11-01 23:04:43 +00002809 chk_length, net, high_tsn, &abort_flag, &break_flag,
Michael Tuexene5001952016-04-17 19:25:27 +02002810 last_chunk, ch->chunk_type)) {
tuexendd729232011-11-01 23:04:43 +00002811 num_chunks++;
2812 }
2813 if (abort_flag)
2814 return (2);
2815
2816 if (break_flag) {
2817 /*
2818 * Set because of out of rwnd space and no
2819 * drop rep space left.
2820 */
2821 stop_proc = 1;
tuexen63fc0bb2011-12-27 12:24:52 +00002822 continue;
tuexendd729232011-11-01 23:04:43 +00002823 }
2824 } else {
2825 /* not a data chunk in the data region */
Michael Tuexene5001952016-04-17 19:25:27 +02002826 switch (ch->chunk_type) {
tuexendd729232011-11-01 23:04:43 +00002827 case SCTP_INITIATION:
2828 case SCTP_INITIATION_ACK:
2829 case SCTP_SELECTIVE_ACK:
tuexen63fc0bb2011-12-27 12:24:52 +00002830 case SCTP_NR_SELECTIVE_ACK:
tuexendd729232011-11-01 23:04:43 +00002831 case SCTP_HEARTBEAT_REQUEST:
2832 case SCTP_HEARTBEAT_ACK:
2833 case SCTP_ABORT_ASSOCIATION:
2834 case SCTP_SHUTDOWN:
2835 case SCTP_SHUTDOWN_ACK:
2836 case SCTP_OPERATION_ERROR:
2837 case SCTP_COOKIE_ECHO:
2838 case SCTP_COOKIE_ACK:
2839 case SCTP_ECN_ECHO:
2840 case SCTP_ECN_CWR:
2841 case SCTP_SHUTDOWN_COMPLETE:
2842 case SCTP_AUTHENTICATION:
2843 case SCTP_ASCONF_ACK:
2844 case SCTP_PACKET_DROPPED:
2845 case SCTP_STREAM_RESET:
2846 case SCTP_FORWARD_CUM_TSN:
2847 case SCTP_ASCONF:
Michael Tuexen0ec21502016-05-12 18:39:01 +02002848 {
tuexendd729232011-11-01 23:04:43 +00002849 /*
2850 * Now, what do we do with KNOWN chunks that
2851 * are NOT in the right place?
2852 *
2853 * For now, I do nothing but ignore them. We
2854 * may later want to add sysctl stuff to
2855 * switch out and do either an ABORT() or
2856 * possibly process them.
2857 */
Michael Tuexen0ec21502016-05-12 18:39:01 +02002858 struct mbuf *op_err;
2859 char msg[SCTP_DIAG_INFO_LEN];
tuexendd729232011-11-01 23:04:43 +00002860
Michael Tuexenedd369d2020-05-19 09:42:15 +02002861 SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2862 ch->chunk_type);
Michael Tuexen0ec21502016-05-12 18:39:01 +02002863 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexen1ade45c2021-07-09 23:32:42 +02002864 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
Michael Tuexen0ec21502016-05-12 18:39:01 +02002865 return (2);
2866 }
tuexendd729232011-11-01 23:04:43 +00002867 default:
Michael Tuexen97aebbe2017-10-18 22:22:05 +02002868 /*
2869 * Unknown chunk type: use bit rules after
2870 * checking length
2871 */
2872 if (chk_length < sizeof(struct sctp_chunkhdr)) {
2873 /*
2874 * Need to send an abort since we had a
2875 * invalid chunk.
2876 */
2877 struct mbuf *op_err;
2878 char msg[SCTP_DIAG_INFO_LEN];
2879
Michael Tuexenedd369d2020-05-19 09:42:15 +02002880 SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
Michael Tuexen97aebbe2017-10-18 22:22:05 +02002881 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexen555c8e82020-07-23 03:43:26 +02002882 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
Michael Tuexen1ade45c2021-07-09 23:32:42 +02002883 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
Michael Tuexen97aebbe2017-10-18 22:22:05 +02002884 return (2);
2885 }
Michael Tuexene5001952016-04-17 19:25:27 +02002886 if (ch->chunk_type & 0x40) {
tuexendd729232011-11-01 23:04:43 +00002887 /* Add a error report to the queue */
Michael Tuexenf39c4292015-09-12 19:39:48 +02002888 struct mbuf *op_err;
2889 struct sctp_gen_error_cause *cause;
tuexendd729232011-11-01 23:04:43 +00002890
Michael Tuexenf39c4292015-09-12 19:39:48 +02002891 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2892 0, M_NOWAIT, 1, MT_DATA);
2893 if (op_err != NULL) {
2894 cause = mtod(op_err, struct sctp_gen_error_cause *);
2895 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
Michael Tuexen1ce9b132016-03-25 15:03:49 +01002896 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
Michael Tuexenf39c4292015-09-12 19:39:48 +02002897 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2898 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2899 if (SCTP_BUF_NEXT(op_err) != NULL) {
2900 sctp_queue_op_err(stcb, op_err);
tuexendd729232011-11-01 23:04:43 +00002901 } else {
Michael Tuexenf39c4292015-09-12 19:39:48 +02002902 sctp_m_freem(op_err);
tuexendd729232011-11-01 23:04:43 +00002903 }
2904 }
2905 }
Michael Tuexene5001952016-04-17 19:25:27 +02002906 if ((ch->chunk_type & 0x80) == 0) {
tuexendd729232011-11-01 23:04:43 +00002907 /* discard the rest of this packet */
2908 stop_proc = 1;
2909 } /* else skip this bad chunk and
2910 * continue... */
2911 break;
tuexen63fc0bb2011-12-27 12:24:52 +00002912 } /* switch of chunk type */
tuexendd729232011-11-01 23:04:43 +00002913 }
2914 *offset += SCTP_SIZE32(chk_length);
2915 if ((*offset >= length) || stop_proc) {
2916 /* no more data left in the mbuf chain */
2917 stop_proc = 1;
2918 continue;
2919 }
Michael Tuexene5001952016-04-17 19:25:27 +02002920 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
Michael Tuexen97aebbe2017-10-18 22:22:05 +02002921 sizeof(struct sctp_chunkhdr),
2922 (uint8_t *)&chunk_buf);
tuexendd729232011-11-01 23:04:43 +00002923 if (ch == NULL) {
2924 *offset = length;
2925 stop_proc = 1;
tuexen63fc0bb2011-12-27 12:24:52 +00002926 continue;
tuexendd729232011-11-01 23:04:43 +00002927 }
tuexen63fc0bb2011-12-27 12:24:52 +00002928 }
tuexendd729232011-11-01 23:04:43 +00002929 if (break_flag) {
2930 /*
2931 * we need to report rwnd overrun drops.
2932 */
tuexen3caef192012-06-24 23:24:06 +00002933 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
tuexendd729232011-11-01 23:04:43 +00002934 }
2935 if (num_chunks) {
2936 /*
2937 * Did we get data, if so update the time for auto-close and
2938 * give peer credit for being alive.
2939 */
2940 SCTP_STAT_INCR(sctps_recvpktwithdata);
2941 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2942 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2943 stcb->asoc.overall_error_count,
2944 0,
2945 SCTP_FROM_SCTP_INDATA,
2946 __LINE__);
2947 }
2948 stcb->asoc.overall_error_count = 0;
2949 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2950 }
2951 /* now service all of the reassm queue if needed */
Michael Tuexen348a36c2018-08-13 16:24:47 +02002952 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
tuexendd729232011-11-01 23:04:43 +00002953 /* Assure that we ack right away */
2954 stcb->asoc.send_sack = 1;
2955 }
2956 /* Start a sack timer or QUEUE a SACK for sending */
tuexen9784e9a2011-12-18 13:04:23 +00002957 sctp_sack_check(stcb, was_a_gap);
tuexendd729232011-11-01 23:04:43 +00002958 return (0);
2959}
2960
2961static int
2962sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2963 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2964 int *num_frs,
2965 uint32_t *biggest_newly_acked_tsn,
2966 uint32_t *this_sack_lowest_newack,
tuexen9784e9a2011-12-18 13:04:23 +00002967 int *rto_ok)
tuexendd729232011-11-01 23:04:43 +00002968{
2969 struct sctp_tmit_chunk *tp1;
2970 unsigned int theTSN;
2971 int j, wake_him = 0, circled = 0;
2972
2973 /* Recover the tp1 we last saw */
2974 tp1 = *p_tp1;
2975 if (tp1 == NULL) {
2976 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2977 }
2978 for (j = frag_strt; j <= frag_end; j++) {
2979 theTSN = j + last_tsn;
2980 while (tp1) {
2981 if (tp1->rec.data.doing_fast_retransmit)
2982 (*num_frs) += 1;
2983
2984 /*-
2985 * CMT: CUCv2 algorithm. For each TSN being
2986 * processed from the sent queue, track the
2987 * next expected pseudo-cumack, or
2988 * rtx_pseudo_cumack, if required. Separate
2989 * cumack trackers for first transmissions,
2990 * and retransmissions.
2991 */
t00fcxenf95cdf42015-03-24 15:12:04 +00002992 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2993 (tp1->whoTo->find_pseudo_cumack == 1) &&
tuexendd729232011-11-01 23:04:43 +00002994 (tp1->snd_count == 1)) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01002995 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00002996 tp1->whoTo->find_pseudo_cumack = 0;
2997 }
t00fcxenf95cdf42015-03-24 15:12:04 +00002998 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2999 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
tuexendd729232011-11-01 23:04:43 +00003000 (tp1->snd_count > 1)) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003001 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00003002 tp1->whoTo->find_rtx_pseudo_cumack = 0;
3003 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01003004 if (tp1->rec.data.tsn == theTSN) {
tuexendd729232011-11-01 23:04:43 +00003005 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3006 /*-
3007 * must be held until
3008 * cum-ack passes
3009 */
3010 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3011 /*-
3012 * If it is less than RESEND, it is
3013 * now no-longer in flight.
3014 * Higher values may already be set
3015 * via previous Gap Ack Blocks...
3016 * i.e. ACKED or RESEND.
3017 */
Michael Tuexen00657ac2016-12-07 21:53:26 +01003018 if (SCTP_TSN_GT(tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003019 *biggest_newly_acked_tsn)) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003020 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00003021 }
3022 /*-
3023 * CMT: SFR algo (and HTNA) - set
3024 * saw_newack to 1 for dest being
3025 * newly acked. update
3026 * this_sack_highest_newack if
3027 * appropriate.
3028 */
3029 if (tp1->rec.data.chunk_was_revoked == 0)
3030 tp1->whoTo->saw_newack = 1;
3031
Michael Tuexen00657ac2016-12-07 21:53:26 +01003032 if (SCTP_TSN_GT(tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003033 tp1->whoTo->this_sack_highest_newack)) {
3034 tp1->whoTo->this_sack_highest_newack =
Michael Tuexen00657ac2016-12-07 21:53:26 +01003035 tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00003036 }
3037 /*-
3038 * CMT DAC algo: also update
3039 * this_sack_lowest_newack
3040 */
3041 if (*this_sack_lowest_newack == 0) {
3042 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3043 sctp_log_sack(*this_sack_lowest_newack,
3044 last_tsn,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003045 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003046 0,
3047 0,
3048 SCTP_LOG_TSN_ACKED);
3049 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01003050 *this_sack_lowest_newack = tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00003051 }
3052 /*-
3053 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3054 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3055 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3056 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3057 * Separate pseudo_cumack trackers for first transmissions and
3058 * retransmissions.
3059 */
Michael Tuexen00657ac2016-12-07 21:53:26 +01003060 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
tuexendd729232011-11-01 23:04:43 +00003061 if (tp1->rec.data.chunk_was_revoked == 0) {
3062 tp1->whoTo->new_pseudo_cumack = 1;
3063 }
3064 tp1->whoTo->find_pseudo_cumack = 1;
3065 }
3066 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003067 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
tuexendd729232011-11-01 23:04:43 +00003068 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01003069 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
tuexendd729232011-11-01 23:04:43 +00003070 if (tp1->rec.data.chunk_was_revoked == 0) {
3071 tp1->whoTo->new_pseudo_cumack = 1;
3072 }
3073 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3074 }
3075 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3076 sctp_log_sack(*biggest_newly_acked_tsn,
3077 last_tsn,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003078 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003079 frag_strt,
3080 frag_end,
3081 SCTP_LOG_TSN_ACKED);
3082 }
3083 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3084 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3085 tp1->whoTo->flight_size,
3086 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01003087 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003088 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00003089 }
3090 sctp_flight_size_decrease(tp1);
3091 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3092 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3093 tp1);
3094 }
3095 sctp_total_flight_decrease(stcb, tp1);
3096
3097 tp1->whoTo->net_ack += tp1->send_size;
3098 if (tp1->snd_count < 2) {
3099 /*-
Michael Tuexenc51af972018-08-12 15:32:55 +02003100 * True non-retransmitted chunk
tuexendd729232011-11-01 23:04:43 +00003101 */
3102 tp1->whoTo->net_ack2 += tp1->send_size;
3103
3104 /*-
3105 * update RTO too ?
3106 */
3107 if (tp1->do_rtt) {
Michael Tuexenb7ed78b2019-09-22 12:48:36 +02003108 if (*rto_ok &&
3109 sctp_calculate_rto(stcb,
3110 &stcb->asoc,
3111 tp1->whoTo,
3112 &tp1->sent_rcv_time,
3113 SCTP_RTT_FROM_DATA)) {
tuexendd729232011-11-01 23:04:43 +00003114 *rto_ok = 0;
3115 }
3116 if (tp1->whoTo->rto_needed == 0) {
3117 tp1->whoTo->rto_needed = 1;
3118 }
3119 tp1->do_rtt = 0;
3120 }
3121 }
tuexendd729232011-11-01 23:04:43 +00003122 }
3123 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003124 if (SCTP_TSN_GT(tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003125 stcb->asoc.this_sack_highest_gap)) {
3126 stcb->asoc.this_sack_highest_gap =
Michael Tuexen00657ac2016-12-07 21:53:26 +01003127 tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00003128 }
3129 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3130 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3131#ifdef SCTP_AUDITING_ENABLED
3132 sctp_audit_log(0xB2,
3133 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3134#endif
3135 }
3136 }
3137 /*-
3138 * All chunks NOT UNSENT fall through here and are marked
3139 * (leave PR-SCTP ones that are to skip alone though)
3140 */
t00fcxen9ad90772012-11-07 22:19:57 +00003141 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
t00fcxen8fcc5142012-11-16 19:46:12 +00003142 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
tuexendd729232011-11-01 23:04:43 +00003143 tp1->sent = SCTP_DATAGRAM_MARKED;
t00fcxen9ad90772012-11-07 22:19:57 +00003144 }
tuexendd729232011-11-01 23:04:43 +00003145 if (tp1->rec.data.chunk_was_revoked) {
3146 /* deflate the cwnd */
3147 tp1->whoTo->cwnd -= tp1->book_size;
3148 tp1->rec.data.chunk_was_revoked = 0;
3149 }
3150 /* NR Sack code here */
t00fcxen8fcc5142012-11-16 19:46:12 +00003151 if (nr_sacking &&
3152 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003153 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3154 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
t00fcxen8fcc5142012-11-16 19:46:12 +00003155#ifdef INVARIANTS
3156 } else {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003157 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
t00fcxen8fcc5142012-11-16 19:46:12 +00003158#endif
3159 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01003160 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3161 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3162 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
Michael Tuexenc0a12d12015-12-03 16:30:24 +01003163 stcb->asoc.trigger_reset = 1;
3164 }
t00fcxen8fcc5142012-11-16 19:46:12 +00003165 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
tuexendd729232011-11-01 23:04:43 +00003166 if (tp1->data) {
3167 /* sa_ignore NO_NULL_CHK */
3168 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3169 sctp_m_freem(tp1->data);
3170 tp1->data = NULL;
3171 }
3172 wake_him++;
3173 }
3174 }
3175 break;
Michael Tuexen00657ac2016-12-07 21:53:26 +01003176 } /* if (tp1->tsn == theTSN) */
3177 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
tuexendd729232011-11-01 23:04:43 +00003178 break;
3179 }
3180 tp1 = TAILQ_NEXT(tp1, sctp_next);
3181 if ((tp1 == NULL) && (circled == 0)) {
3182 circled++;
3183 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3184 }
3185 } /* end while (tp1) */
3186 if (tp1 == NULL) {
3187 circled = 0;
3188 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3189 }
3190 /* In case the fragments were not in order we must reset */
3191 } /* end for (j = fragStart */
3192 *p_tp1 = tp1;
3193 return (wake_him); /* Return value only used for nr-sack */
3194}
3195
tuexendd729232011-11-01 23:04:43 +00003196static int
3197sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3198 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3199 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
tuexen9784e9a2011-12-18 13:04:23 +00003200 int num_seg, int num_nr_seg, int *rto_ok)
tuexendd729232011-11-01 23:04:43 +00003201{
3202 struct sctp_gap_ack_block *frag, block;
3203 struct sctp_tmit_chunk *tp1;
3204 int i;
3205 int num_frs = 0;
3206 int chunk_freed;
3207 int non_revocable;
3208 uint16_t frag_strt, frag_end, prev_frag_end;
3209
3210 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3211 prev_frag_end = 0;
3212 chunk_freed = 0;
3213
3214 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3215 if (i == num_seg) {
3216 prev_frag_end = 0;
3217 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3218 }
3219 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3220 sizeof(struct sctp_gap_ack_block), (uint8_t *) &block);
3221 *offset += sizeof(block);
3222 if (frag == NULL) {
3223 return (chunk_freed);
3224 }
3225 frag_strt = ntohs(frag->start);
3226 frag_end = ntohs(frag->end);
3227
3228 if (frag_strt > frag_end) {
3229 /* This gap report is malformed, skip it. */
3230 continue;
3231 }
3232 if (frag_strt <= prev_frag_end) {
3233 /* This gap report is not in order, so restart. */
3234 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3235 }
3236 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3237 *biggest_tsn_acked = last_tsn + frag_end;
3238 }
3239 if (i < num_seg) {
3240 non_revocable = 0;
3241 } else {
3242 non_revocable = 1;
3243 }
3244 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3245 non_revocable, &num_frs, biggest_newly_acked_tsn,
tuexen9784e9a2011-12-18 13:04:23 +00003246 this_sack_lowest_newack, rto_ok)) {
tuexendd729232011-11-01 23:04:43 +00003247 chunk_freed = 1;
3248 }
3249 prev_frag_end = frag_end;
3250 }
3251 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3252 if (num_frs)
3253 sctp_log_fr(*biggest_tsn_acked,
3254 *biggest_newly_acked_tsn,
3255 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3256 }
3257 return (chunk_freed);
3258}
3259
3260static void
3261sctp_check_for_revoked(struct sctp_tcb *stcb,
3262 struct sctp_association *asoc, uint32_t cumack,
3263 uint32_t biggest_tsn_acked)
3264{
3265 struct sctp_tmit_chunk *tp1;
tuexendd729232011-11-01 23:04:43 +00003266
3267 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003268 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
tuexendd729232011-11-01 23:04:43 +00003269 /*
3270 * ok this guy is either ACK or MARKED. If it is
3271 * ACKED it has been previously acked but not this
3272 * time i.e. revoked. If it is MARKED it was ACK'ed
3273 * again.
3274 */
Michael Tuexen00657ac2016-12-07 21:53:26 +01003275 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
tuexendd729232011-11-01 23:04:43 +00003276 break;
3277 }
3278 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3279 /* it has been revoked */
3280 tp1->sent = SCTP_DATAGRAM_SENT;
3281 tp1->rec.data.chunk_was_revoked = 1;
3282 /* We must add this stuff back in to
3283 * assure timers and such get started.
3284 */
3285 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3286 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3287 tp1->whoTo->flight_size,
3288 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01003289 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003290 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00003291 }
3292 sctp_flight_size_increase(tp1);
3293 sctp_total_flight_increase(stcb, tp1);
3294 /* We inflate the cwnd to compensate for our
3295 * artificial inflation of the flight_size.
3296 */
3297 tp1->whoTo->cwnd += tp1->book_size;
tuexendd729232011-11-01 23:04:43 +00003298 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3299 sctp_log_sack(asoc->last_acked_seq,
3300 cumack,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003301 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003302 0,
3303 0,
3304 SCTP_LOG_TSN_REVOKED);
3305 }
3306 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3307 /* it has been re-acked in this SACK */
3308 tp1->sent = SCTP_DATAGRAM_ACKED;
3309 }
3310 }
3311 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3312 break;
3313 }
3314}
3315
tuexendd729232011-11-01 23:04:43 +00003316static void
3317sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3318 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3319{
3320 struct sctp_tmit_chunk *tp1;
3321 int strike_flag = 0;
3322 struct timeval now;
tuexendd729232011-11-01 23:04:43 +00003323 uint32_t sending_seq;
3324 struct sctp_nets *net;
3325 int num_dests_sacked = 0;
3326
3327 /*
3328 * select the sending_seq, this is either the next thing ready to be
3329 * sent but not transmitted, OR, the next seq we assign.
3330 */
3331 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3332 if (tp1 == NULL) {
3333 sending_seq = asoc->sending_seq;
3334 } else {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003335 sending_seq = tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00003336 }
3337
3338 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3339 if ((asoc->sctp_cmt_on_off > 0) &&
3340 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3341 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3342 if (net->saw_newack)
3343 num_dests_sacked++;
3344 }
3345 }
t00fcxen0e78cef2014-08-02 22:05:33 +00003346 if (stcb->asoc.prsctp_supported) {
tuexendd729232011-11-01 23:04:43 +00003347 (void)SCTP_GETTIME_TIMEVAL(&now);
3348 }
3349 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3350 strike_flag = 0;
3351 if (tp1->no_fr_allowed) {
3352 /* this one had a timeout or something */
3353 continue;
3354 }
3355 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3356 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3357 sctp_log_fr(biggest_tsn_newly_acked,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003358 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003359 tp1->sent,
3360 SCTP_FR_LOG_CHECK_STRIKE);
3361 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01003362 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
tuexendd729232011-11-01 23:04:43 +00003363 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3364 /* done */
3365 break;
3366 }
t00fcxen0e78cef2014-08-02 22:05:33 +00003367 if (stcb->asoc.prsctp_supported) {
tuexendd729232011-11-01 23:04:43 +00003368 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3369 /* Is it expired? */
Michael Tuexen5be0c252020-06-13 00:53:56 +02003370#if !(defined(__FreeBSD__) && !defined(__Userspace__))
tuexendd729232011-11-01 23:04:43 +00003371 if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
3372#else
3373 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3374#endif
3375 /* Yes so drop it */
3376 if (tp1->data != NULL) {
tuexenda53ff02012-05-14 09:00:59 +00003377 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
tuexendd729232011-11-01 23:04:43 +00003378 SCTP_SO_NOT_LOCKED);
3379 }
3380 continue;
3381 }
3382 }
tuexendd729232011-11-01 23:04:43 +00003383 }
Michael Tuexen83714a82018-01-16 23:02:09 +01003384 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3385 !(accum_moved && asoc->fast_retran_loss_recovery)) {
tuexendd729232011-11-01 23:04:43 +00003386 /* we are beyond the tsn in the sack */
3387 break;
3388 }
3389 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3390 /* either a RESEND, ACKED, or MARKED */
3391 /* skip */
3392 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3393 /* Continue strikin FWD-TSN chunks */
3394 tp1->rec.data.fwd_tsn_cnt++;
3395 }
3396 continue;
3397 }
3398 /*
3399 * CMT : SFR algo (covers part of DAC and HTNA as well)
3400 */
3401 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3402 /*
David Sandersb519f512022-03-29 13:55:16 -07003403 * No new acks were received for data sent to this
tuexendd729232011-11-01 23:04:43 +00003404 * dest. Therefore, according to the SFR algo for
3405 * CMT, no data sent to this dest can be marked for
3406 * FR using this SACK.
3407 */
3408 continue;
Michael Tuexen83714a82018-01-16 23:02:09 +01003409 } else if (tp1->whoTo &&
3410 SCTP_TSN_GT(tp1->rec.data.tsn,
3411 tp1->whoTo->this_sack_highest_newack) &&
3412 !(accum_moved && asoc->fast_retran_loss_recovery)) {
tuexendd729232011-11-01 23:04:43 +00003413 /*
David Sandersb519f512022-03-29 13:55:16 -07003414 * CMT: New acks were received for data sent to
tuexendd729232011-11-01 23:04:43 +00003415 * this dest. But no new acks were seen for data
3416 * sent after tp1. Therefore, according to the SFR
3417 * algo for CMT, tp1 cannot be marked for FR using
3418 * this SACK. This step covers part of the DAC algo
3419 * and the HTNA algo as well.
3420 */
3421 continue;
3422 }
3423 /*
3424 * Here we check to see if we were have already done a FR
3425 * and if so we see if the biggest TSN we saw in the sack is
3426 * smaller than the recovery point. If so we don't strike
3427 * the tsn... otherwise we CAN strike the TSN.
3428 */
3429 /*
3430 * @@@ JRI: Check for CMT
3431 * if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) {
3432 */
3433 if (accum_moved && asoc->fast_retran_loss_recovery) {
3434 /*
3435 * Strike the TSN if in fast-recovery and cum-ack
3436 * moved.
3437 */
3438 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3439 sctp_log_fr(biggest_tsn_newly_acked,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003440 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003441 tp1->sent,
3442 SCTP_FR_LOG_STRIKE_CHUNK);
3443 }
3444 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3445 tp1->sent++;
3446 }
3447 if ((asoc->sctp_cmt_on_off > 0) &&
3448 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3449 /*
3450 * CMT DAC algorithm: If SACK flag is set to
3451 * 0, then lowest_newack test will not pass
3452 * because it would have been set to the
3453 * cumack earlier. If not already to be
3454 * rtx'd, If not a mixed sack and if tp1 is
3455 * not between two sacked TSNs, then mark by
3456 * one more.
3457 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3458 * two packets have been received after this missing TSN.
3459 */
3460 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
Michael Tuexen00657ac2016-12-07 21:53:26 +01003461 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
tuexendd729232011-11-01 23:04:43 +00003462 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3463 sctp_log_fr(16 + num_dests_sacked,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003464 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003465 tp1->sent,
3466 SCTP_FR_LOG_STRIKE_CHUNK);
3467 }
3468 tp1->sent++;
3469 }
3470 }
3471 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3472 (asoc->sctp_cmt_on_off == 0)) {
3473 /*
3474 * For those that have done a FR we must take
3475 * special consideration if we strike. I.e the
3476 * biggest_newly_acked must be higher than the
3477 * sending_seq at the time we did the FR.
3478 */
3479 if (
3480#ifdef SCTP_FR_TO_ALTERNATE
3481 /*
3482 * If FR's go to new networks, then we must only do
3483 * this for singly homed asoc's. However if the FR's
3484 * go to the same network (Armando's work) then its
3485 * ok to FR multiple times.
3486 */
3487 (asoc->numnets < 2)
3488#else
3489 (1)
3490#endif
3491 ) {
tuexendd729232011-11-01 23:04:43 +00003492 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3493 tp1->rec.data.fast_retran_tsn)) {
3494 /*
3495 * Strike the TSN, since this ack is
3496 * beyond where things were when we
3497 * did a FR.
3498 */
3499 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3500 sctp_log_fr(biggest_tsn_newly_acked,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003501 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003502 tp1->sent,
3503 SCTP_FR_LOG_STRIKE_CHUNK);
3504 }
3505 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3506 tp1->sent++;
3507 }
3508 strike_flag = 1;
3509 if ((asoc->sctp_cmt_on_off > 0) &&
3510 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3511 /*
3512 * CMT DAC algorithm: If
3513 * SACK flag is set to 0,
3514 * then lowest_newack test
3515 * will not pass because it
3516 * would have been set to
3517 * the cumack earlier. If
3518 * not already to be rtx'd,
3519 * If not a mixed sack and
3520 * if tp1 is not between two
3521 * sacked TSNs, then mark by
3522 * one more.
3523 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3524 * two packets have been received after this missing TSN.
3525 */
3526 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3527 (num_dests_sacked == 1) &&
3528 SCTP_TSN_GT(this_sack_lowest_newack,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003529 tp1->rec.data.tsn)) {
tuexendd729232011-11-01 23:04:43 +00003530 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3531 sctp_log_fr(32 + num_dests_sacked,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003532 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003533 tp1->sent,
3534 SCTP_FR_LOG_STRIKE_CHUNK);
3535 }
3536 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3537 tp1->sent++;
3538 }
3539 }
3540 }
3541 }
3542 }
3543 /*
3544 * JRI: TODO: remove code for HTNA algo. CMT's
3545 * SFR algo covers HTNA.
3546 */
Michael Tuexen00657ac2016-12-07 21:53:26 +01003547 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003548 biggest_tsn_newly_acked)) {
3549 /*
3550 * We don't strike these: This is the HTNA
3551 * algorithm i.e. we don't strike If our TSN is
3552 * larger than the Highest TSN Newly Acked.
3553 */
3554 ;
3555 } else {
3556 /* Strike the TSN */
3557 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3558 sctp_log_fr(biggest_tsn_newly_acked,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003559 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003560 tp1->sent,
3561 SCTP_FR_LOG_STRIKE_CHUNK);
3562 }
3563 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3564 tp1->sent++;
3565 }
3566 if ((asoc->sctp_cmt_on_off > 0) &&
3567 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3568 /*
3569 * CMT DAC algorithm: If SACK flag is set to
3570 * 0, then lowest_newack test will not pass
3571 * because it would have been set to the
3572 * cumack earlier. If not already to be
3573 * rtx'd, If not a mixed sack and if tp1 is
3574 * not between two sacked TSNs, then mark by
3575 * one more.
3576 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3577 * two packets have been received after this missing TSN.
3578 */
3579 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
Michael Tuexen00657ac2016-12-07 21:53:26 +01003580 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
tuexendd729232011-11-01 23:04:43 +00003581 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3582 sctp_log_fr(48 + num_dests_sacked,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003583 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003584 tp1->sent,
3585 SCTP_FR_LOG_STRIKE_CHUNK);
3586 }
3587 tp1->sent++;
3588 }
3589 }
3590 }
3591 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3592 struct sctp_nets *alt;
3593
3594 /* fix counts and things */
3595 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3596 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3597 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3598 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01003599 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003600 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00003601 }
3602 if (tp1->whoTo) {
3603 tp1->whoTo->net_ack++;
3604 sctp_flight_size_decrease(tp1);
3605 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3606 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3607 tp1);
3608 }
3609 }
tuexen15f99d82012-04-19 16:08:38 +00003610
tuexendd729232011-11-01 23:04:43 +00003611 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3612 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3613 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3614 }
3615 /* add back to the rwnd */
3616 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
tuexen15f99d82012-04-19 16:08:38 +00003617
tuexendd729232011-11-01 23:04:43 +00003618 /* remove from the total flight */
3619 sctp_total_flight_decrease(stcb, tp1);
3620
t00fcxen0e78cef2014-08-02 22:05:33 +00003621 if ((stcb->asoc.prsctp_supported) &&
tuexendd729232011-11-01 23:04:43 +00003622 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3623 /* Has it been retransmitted tv_sec times? - we store the retran count there. */
3624 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3625 /* Yes, so drop it */
3626 if (tp1->data != NULL) {
tuexenda53ff02012-05-14 09:00:59 +00003627 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
tuexendd729232011-11-01 23:04:43 +00003628 SCTP_SO_NOT_LOCKED);
3629 }
3630 /* Make sure to flag we had a FR */
Michael Tuexen4d933602018-05-06 16:23:44 +02003631 if (tp1->whoTo != NULL) {
3632 tp1->whoTo->net_ack++;
3633 }
tuexendd729232011-11-01 23:04:43 +00003634 continue;
3635 }
tuexen15f99d82012-04-19 16:08:38 +00003636 }
tuexencb5fe8d2012-05-04 09:50:27 +00003637 /* SCTP_PRINTF("OK, we are now ready to FR this guy\n"); */
tuexendd729232011-11-01 23:04:43 +00003638 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003639 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
tuexendd729232011-11-01 23:04:43 +00003640 0, SCTP_FR_MARKED);
3641 }
3642 if (strike_flag) {
3643 /* This is a subsequent FR */
3644 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3645 }
3646 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3647 if (asoc->sctp_cmt_on_off > 0) {
3648 /*
3649 * CMT: Using RTX_SSTHRESH policy for CMT.
3650 * If CMT is being used, then pick dest with
3651 * largest ssthresh for any retransmission.
3652 */
3653 tp1->no_fr_allowed = 1;
3654 alt = tp1->whoTo;
3655 /*sa_ignore NO_NULL_CHK*/
3656 if (asoc->sctp_cmt_pf > 0) {
3657 /* JRS 5/18/07 - If CMT PF is on, use the PF version of find_alt_net() */
3658 alt = sctp_find_alternate_net(stcb, alt, 2);
3659 } else {
3660 /* JRS 5/18/07 - If only CMT is on, use the CMT version of find_alt_net() */
3661 /*sa_ignore NO_NULL_CHK*/
3662 alt = sctp_find_alternate_net(stcb, alt, 1);
3663 }
3664 if (alt == NULL) {
3665 alt = tp1->whoTo;
3666 }
3667 /*
3668 * CUCv2: If a different dest is picked for
3669 * the retransmission, then new
3670 * (rtx-)pseudo_cumack needs to be tracked
3671 * for orig dest. Let CUCv2 track new (rtx-)
3672 * pseudo-cumack always.
3673 */
3674 if (tp1->whoTo) {
3675 tp1->whoTo->find_pseudo_cumack = 1;
3676 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3677 }
tuexendd729232011-11-01 23:04:43 +00003678 } else {/* CMT is OFF */
tuexendd729232011-11-01 23:04:43 +00003679#ifdef SCTP_FR_TO_ALTERNATE
3680 /* Can we find an alternate? */
3681 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3682#else
3683 /*
3684 * default behavior is to NOT retransmit
3685 * FR's to an alternate. Armando Caro's
3686 * paper details why.
3687 */
3688 alt = tp1->whoTo;
3689#endif
3690 }
3691
3692 tp1->rec.data.doing_fast_retransmit = 1;
tuexendd729232011-11-01 23:04:43 +00003693 /* mark the sending seq for possible subsequent FR's */
3694 /*
tuexencb5fe8d2012-05-04 09:50:27 +00003695 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01003696 * (uint32_t)tpi->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00003697 */
3698 if (TAILQ_EMPTY(&asoc->send_queue)) {
3699 /*
3700 * If the queue of send is empty then its
3701 * the next sequence number that will be
3702 * assigned so we subtract one from this to
3703 * get the one we last sent.
3704 */
3705 tp1->rec.data.fast_retran_tsn = sending_seq;
3706 } else {
3707 /*
3708 * If there are chunks on the send queue
3709 * (unsent data that has made it from the
3710 * stream queues but not out the door, we
3711 * take the first one (which will have the
3712 * lowest TSN) and subtract one to get the
3713 * one we last sent.
3714 */
3715 struct sctp_tmit_chunk *ttt;
3716
3717 ttt = TAILQ_FIRST(&asoc->send_queue);
3718 tp1->rec.data.fast_retran_tsn =
Michael Tuexen00657ac2016-12-07 21:53:26 +01003719 ttt->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00003720 }
3721
3722 if (tp1->do_rtt) {
3723 /*
3724 * this guy had a RTO calculation pending on
3725 * it, cancel it
3726 */
tuexen63fc0bb2011-12-27 12:24:52 +00003727 if ((tp1->whoTo != NULL) &&
3728 (tp1->whoTo->rto_needed == 0)) {
tuexendd729232011-11-01 23:04:43 +00003729 tp1->whoTo->rto_needed = 1;
3730 }
3731 tp1->do_rtt = 0;
3732 }
3733 if (alt != tp1->whoTo) {
3734 /* yes, there is an alternate. */
3735 sctp_free_remote_addr(tp1->whoTo);
3736 /*sa_ignore FREED_MEMORY*/
3737 tp1->whoTo = alt;
3738 atomic_add_int(&alt->ref_count, 1);
3739 }
3740 }
3741 }
3742}
3743
3744struct sctp_tmit_chunk *
3745sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3746 struct sctp_association *asoc)
3747{
3748 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3749 struct timeval now;
3750 int now_filled = 0;
3751
t00fcxen0e78cef2014-08-02 22:05:33 +00003752 if (asoc->prsctp_supported == 0) {
tuexendd729232011-11-01 23:04:43 +00003753 return (NULL);
3754 }
3755 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3756 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
t00fcxen06a2a532012-11-07 21:03:47 +00003757 tp1->sent != SCTP_DATAGRAM_RESEND &&
t00fcxen8fcc5142012-11-16 19:46:12 +00003758 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
tuexendd729232011-11-01 23:04:43 +00003759 /* no chance to advance, out of here */
3760 break;
3761 }
3762 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
t00fcxen9ad90772012-11-07 22:19:57 +00003763 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
t00fcxen8fcc5142012-11-16 19:46:12 +00003764 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
tuexendd729232011-11-01 23:04:43 +00003765 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3766 asoc->advanced_peer_ack_point,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003767 tp1->rec.data.tsn, 0, 0);
tuexendd729232011-11-01 23:04:43 +00003768 }
3769 }
3770 if (!PR_SCTP_ENABLED(tp1->flags)) {
3771 /*
3772 * We can't fwd-tsn past any that are reliable aka
3773 * retransmitted until the asoc fails.
3774 */
3775 break;
3776 }
3777 if (!now_filled) {
3778 (void)SCTP_GETTIME_TIMEVAL(&now);
3779 now_filled = 1;
3780 }
3781 /*
3782 * now we got a chunk which is marked for another
3783 * retransmission to a PR-stream but has run out its chances
3784 * already maybe OR has been marked to skip now. Can we skip
3785 * it if its a resend?
3786 */
3787 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3788 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3789 /*
3790 * Now is this one marked for resend and its time is
3791 * now up?
3792 */
Michael Tuexen8d6935d2022-05-15 01:51:24 +02003793#if !(defined(__FreeBSD__) && !defined(__Userspace__))
tuexendd729232011-11-01 23:04:43 +00003794 if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
3795#else
3796 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3797#endif
3798 /* Yes so drop it */
3799 if (tp1->data) {
3800 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
tuexenda53ff02012-05-14 09:00:59 +00003801 1, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00003802 }
3803 } else {
3804 /*
3805 * No, we are done when hit one for resend
3806 * whos time as not expired.
3807 */
3808 break;
3809 }
3810 }
3811 /*
3812 * Ok now if this chunk is marked to drop it we can clean up
3813 * the chunk, advance our peer ack point and we can check
3814 * the next chunk.
3815 */
t00fcxen06a2a532012-11-07 21:03:47 +00003816 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
t00fcxen8fcc5142012-11-16 19:46:12 +00003817 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
tuexendd729232011-11-01 23:04:43 +00003818 /* advance PeerAckPoint goes forward */
Michael Tuexen00657ac2016-12-07 21:53:26 +01003819 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3820 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00003821 a_adv = tp1;
Michael Tuexen00657ac2016-12-07 21:53:26 +01003822 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
tuexendd729232011-11-01 23:04:43 +00003823 /* No update but we do save the chk */
3824 a_adv = tp1;
3825 }
3826 } else {
3827 /*
3828 * If it is still in RESEND we can advance no
3829 * further
3830 */
3831 break;
3832 }
3833 }
3834 return (a_adv);
3835}
3836
3837static int
3838sctp_fs_audit(struct sctp_association *asoc)
3839{
3840 struct sctp_tmit_chunk *chk;
tuexen63fc0bb2011-12-27 12:24:52 +00003841 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
t00fcxen006c3bc2015-05-28 14:33:28 +00003842 int ret;
3843#ifndef INVARIANTS
3844 int entry_flight, entry_cnt;
3845#endif
t00fcxen2ea88ad2014-02-20 20:24:25 +00003846
t00fcxen006c3bc2015-05-28 14:33:28 +00003847 ret = 0;
3848#ifndef INVARIANTS
tuexendd729232011-11-01 23:04:43 +00003849 entry_flight = asoc->total_flight;
3850 entry_cnt = asoc->total_flight_count;
t00fcxen006c3bc2015-05-28 14:33:28 +00003851#endif
tuexendd729232011-11-01 23:04:43 +00003852 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3853 return (0);
3854
3855 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3856 if (chk->sent < SCTP_DATAGRAM_RESEND) {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02003857 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01003858 chk->rec.data.tsn,
tuexencb5fe8d2012-05-04 09:50:27 +00003859 chk->send_size,
3860 chk->snd_count);
tuexendd729232011-11-01 23:04:43 +00003861 inflight++;
3862 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3863 resend++;
3864 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3865 inbetween++;
3866 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3867 above++;
3868 } else {
3869 acked++;
3870 }
3871 }
3872
3873 if ((inflight > 0) || (inbetween > 0)) {
3874#ifdef INVARIANTS
Michael Tuexene19d1222022-08-03 17:35:14 +02003875 panic("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d",
3876 inflight, inbetween, resend, above, acked);
tuexendd729232011-11-01 23:04:43 +00003877#else
Michael Tuexeneccb4be2016-04-18 08:58:59 +02003878 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
tuexencb5fe8d2012-05-04 09:50:27 +00003879 entry_flight, entry_cnt);
Michael Tuexeneccb4be2016-04-18 08:58:59 +02003880 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
Michael Tuexene19d1222022-08-03 17:35:14 +02003881 inflight, inbetween, resend, above, acked);
tuexendd729232011-11-01 23:04:43 +00003882 ret = 1;
3883#endif
3884 }
3885 return (ret);
3886}
3887
tuexendd729232011-11-01 23:04:43 +00003888static void
3889sctp_window_probe_recovery(struct sctp_tcb *stcb,
Michael Tuexenf7f9cd82020-09-24 15:14:13 +02003890 struct sctp_association *asoc,
3891 struct sctp_tmit_chunk *tp1)
tuexendd729232011-11-01 23:04:43 +00003892{
3893 tp1->window_probe = 0;
3894 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3895 /* TSN's skipped we do NOT move back. */
3896 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
t00fcxenf95cdf42015-03-24 15:12:04 +00003897 tp1->whoTo ? tp1->whoTo->flight_size : 0,
tuexendd729232011-11-01 23:04:43 +00003898 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01003899 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003900 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00003901 return;
3902 }
3903 /* First setup this by shrinking flight */
3904 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3905 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3906 tp1);
3907 }
3908 sctp_flight_size_decrease(tp1);
3909 sctp_total_flight_decrease(stcb, tp1);
3910 /* Now mark for resend */
3911 tp1->sent = SCTP_DATAGRAM_RESEND;
3912 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
tuexen15f99d82012-04-19 16:08:38 +00003913
tuexendd729232011-11-01 23:04:43 +00003914 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3915 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3916 tp1->whoTo->flight_size,
3917 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01003918 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003919 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00003920 }
3921}
3922
3923void
3924sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3925 uint32_t rwnd, int *abort_now, int ecne_seen)
3926{
3927 struct sctp_nets *net;
3928 struct sctp_association *asoc;
3929 struct sctp_tmit_chunk *tp1, *tp2;
3930 uint32_t old_rwnd;
3931 int win_probe_recovery = 0;
3932 int win_probe_recovered = 0;
3933 int j, done_once = 0;
tuexen63fc0bb2011-12-27 12:24:52 +00003934 int rto_ok = 1;
Michael Tuexen0ec21502016-05-12 18:39:01 +02003935 uint32_t send_s;
tuexendd729232011-11-01 23:04:43 +00003936
3937 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3938 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3939 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3940 }
3941 SCTP_TCB_LOCK_ASSERT(stcb);
3942#ifdef SCTP_ASOCLOG_OF_TSNS
3943 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3944 stcb->asoc.cumack_log_at++;
3945 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3946 stcb->asoc.cumack_log_at = 0;
3947 }
3948#endif
3949 asoc = &stcb->asoc;
3950 old_rwnd = asoc->peers_rwnd;
3951 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3952 /* old ack */
3953 return;
3954 } else if (asoc->last_acked_seq == cumack) {
3955 /* Window update sack */
3956 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3957 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3958 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3959 /* SWS sender side engages */
3960 asoc->peers_rwnd = 0;
3961 }
3962 if (asoc->peers_rwnd > old_rwnd) {
3963 goto again;
3964 }
3965 return;
3966 }
3967
3968 /* First setup for CC stuff */
3969 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3970 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3971 /* Drag along the window_tsn for cwr's */
3972 net->cwr_window_tsn = cumack;
3973 }
3974 net->prev_cwnd = net->cwnd;
3975 net->net_ack = 0;
3976 net->net_ack2 = 0;
3977
3978 /*
3979 * CMT: Reset CUC and Fast recovery algo variables before
3980 * SACK processing
3981 */
3982 net->new_pseudo_cumack = 0;
3983 net->will_exit_fast_recovery = 0;
3984 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3985 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
3986 }
3987 }
Michael Tuexen0ec21502016-05-12 18:39:01 +02003988 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3989 tp1 = TAILQ_LAST(&asoc->sent_queue,
3990 sctpchunk_listhead);
Michael Tuexen00657ac2016-12-07 21:53:26 +01003991 send_s = tp1->rec.data.tsn + 1;
Michael Tuexen0ec21502016-05-12 18:39:01 +02003992 } else {
3993 send_s = asoc->sending_seq;
3994 }
3995 if (SCTP_TSN_GE(cumack, send_s)) {
3996 struct mbuf *op_err;
3997 char msg[SCTP_DIAG_INFO_LEN];
tuexendd729232011-11-01 23:04:43 +00003998
Michael Tuexen0ec21502016-05-12 18:39:01 +02003999 *abort_now = 1;
4000 /* XXX */
Michael Tuexenedd369d2020-05-19 09:42:15 +02004001 SCTP_SNPRINTF(msg, sizeof(msg),
4002 "Cum ack %8.8x greater or equal than TSN %8.8x",
4003 cumack, send_s);
Michael Tuexen0ec21502016-05-12 18:39:01 +02004004 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexen555c8e82020-07-23 03:43:26 +02004005 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
Michael Tuexen1ade45c2021-07-09 23:32:42 +02004006 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
Michael Tuexen0ec21502016-05-12 18:39:01 +02004007 return;
tuexendd729232011-11-01 23:04:43 +00004008 }
4009 asoc->this_sack_highest_gap = cumack;
4010 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4011 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4012 stcb->asoc.overall_error_count,
4013 0,
4014 SCTP_FROM_SCTP_INDATA,
4015 __LINE__);
4016 }
4017 stcb->asoc.overall_error_count = 0;
4018 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4019 /* process the new consecutive TSN first */
4020 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004021 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
tuexendd729232011-11-01 23:04:43 +00004022 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
tuexencb5fe8d2012-05-04 09:50:27 +00004023 SCTP_PRINTF("Warning, an unsent is now acked?\n");
tuexendd729232011-11-01 23:04:43 +00004024 }
4025 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4026 /*
4027 * If it is less than ACKED, it is
4028 * now no-longer in flight. Higher
4029 * values may occur during marking
4030 */
4031 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4032 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4033 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4034 tp1->whoTo->flight_size,
4035 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01004036 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01004037 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00004038 }
4039 sctp_flight_size_decrease(tp1);
4040 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4041 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
4042 tp1);
4043 }
4044 /* sa_ignore NO_NULL_CHK */
4045 sctp_total_flight_decrease(stcb, tp1);
4046 }
4047 tp1->whoTo->net_ack += tp1->send_size;
4048 if (tp1->snd_count < 2) {
4049 /*
Michael Tuexenc51af972018-08-12 15:32:55 +02004050 * True non-retransmitted
tuexendd729232011-11-01 23:04:43 +00004051 * chunk
4052 */
4053 tp1->whoTo->net_ack2 +=
4054 tp1->send_size;
4055
4056 /* update RTO too? */
4057 if (tp1->do_rtt) {
Michael Tuexenb7ed78b2019-09-22 12:48:36 +02004058 if (rto_ok &&
4059 sctp_calculate_rto(stcb,
4060 &stcb->asoc,
4061 tp1->whoTo,
4062 &tp1->sent_rcv_time,
4063 SCTP_RTT_FROM_DATA)) {
tuexendd729232011-11-01 23:04:43 +00004064 rto_ok = 0;
4065 }
4066 if (tp1->whoTo->rto_needed == 0) {
4067 tp1->whoTo->rto_needed = 1;
4068 }
4069 tp1->do_rtt = 0;
4070 }
4071 }
4072 /*
4073 * CMT: CUCv2 algorithm. From the
4074 * cumack'd TSNs, for each TSN being
4075 * acked for the first time, set the
4076 * following variables for the
4077 * corresp destination.
4078 * new_pseudo_cumack will trigger a
4079 * cwnd update.
4080 * find_(rtx_)pseudo_cumack will
4081 * trigger search for the next
4082 * expected (rtx-)pseudo-cumack.
4083 */
4084 tp1->whoTo->new_pseudo_cumack = 1;
4085 tp1->whoTo->find_pseudo_cumack = 1;
4086 tp1->whoTo->find_rtx_pseudo_cumack = 1;
tuexendd729232011-11-01 23:04:43 +00004087 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4088 /* sa_ignore NO_NULL_CHK */
Michael Tuexen00657ac2016-12-07 21:53:26 +01004089 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
tuexendd729232011-11-01 23:04:43 +00004090 }
4091 }
4092 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4093 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4094 }
4095 if (tp1->rec.data.chunk_was_revoked) {
4096 /* deflate the cwnd */
4097 tp1->whoTo->cwnd -= tp1->book_size;
4098 tp1->rec.data.chunk_was_revoked = 0;
4099 }
t00fcxen8fcc5142012-11-16 19:46:12 +00004100 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004101 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4102 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
t00fcxen9ad90772012-11-07 22:19:57 +00004103#ifdef INVARIANTS
4104 } else {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004105 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
t00fcxen9ad90772012-11-07 22:19:57 +00004106#endif
4107 }
4108 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01004109 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4110 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4111 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
Michael Tuexenc0a12d12015-12-03 16:30:24 +01004112 asoc->trigger_reset = 1;
4113 }
tuexendd729232011-11-01 23:04:43 +00004114 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4115 if (tp1->data) {
4116 /* sa_ignore NO_NULL_CHK */
4117 sctp_free_bufspace(stcb, asoc, tp1, 1);
4118 sctp_m_freem(tp1->data);
4119 tp1->data = NULL;
4120 }
4121 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4122 sctp_log_sack(asoc->last_acked_seq,
4123 cumack,
Michael Tuexen00657ac2016-12-07 21:53:26 +01004124 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00004125 0,
4126 0,
4127 SCTP_LOG_FREE_SENT);
4128 }
4129 asoc->sent_queue_cnt--;
4130 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4131 } else {
4132 break;
4133 }
4134 }
tuexendd729232011-11-01 23:04:43 +00004135 }
tuexen6bffa9a2012-06-25 17:40:03 +00004136#if defined(__Userspace__)
tuexen98456cf2012-04-19 15:37:07 +00004137 if (stcb->sctp_ep->recv_callback) {
4138 if (stcb->sctp_socket) {
4139 uint32_t inqueue_bytes, sb_free_now;
4140 struct sctp_inpcb *inp;
tuexen749d8562011-11-13 13:41:49 +00004141
tuexen98456cf2012-04-19 15:37:07 +00004142 inp = stcb->sctp_ep;
tuexen749d8562011-11-13 13:41:49 +00004143 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
tuexen98456cf2012-04-19 15:37:07 +00004144 sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
4145
4146 /* check if the amount free in the send socket buffer crossed the threshold */
4147 if (inp->send_callback &&
4148 (((inp->send_sb_threshold > 0) &&
4149 (sb_free_now >= inp->send_sb_threshold) &&
4150 (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
4151 (inp->send_sb_threshold == 0))) {
4152 atomic_add_int(&stcb->asoc.refcnt, 1);
4153 SCTP_TCB_UNLOCK(stcb);
José Luis Millán3df8f522020-08-01 12:34:40 +02004154 inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info);
tuexen98456cf2012-04-19 15:37:07 +00004155 SCTP_TCB_LOCK(stcb);
4156 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4157 }
tuexen749d8562011-11-13 13:41:49 +00004158 }
tuexen98456cf2012-04-19 15:37:07 +00004159 } else if (stcb->sctp_socket) {
tuexen749d8562011-11-13 13:41:49 +00004160#else
tuexendd729232011-11-01 23:04:43 +00004161 /* sa_ignore NO_NULL_CHK */
4162 if (stcb->sctp_socket) {
tuexen98456cf2012-04-19 15:37:07 +00004163#endif
Michael Tuexen5be0c252020-06-13 00:53:56 +02004164#if defined(__APPLE__) && !defined(__Userspace__)
tuexendd729232011-11-01 23:04:43 +00004165 struct socket *so;
4166
4167#endif
4168 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4169 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4170 /* sa_ignore NO_NULL_CHK */
tuexen9784e9a2011-12-18 13:04:23 +00004171 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
tuexendd729232011-11-01 23:04:43 +00004172 }
Michael Tuexen5be0c252020-06-13 00:53:56 +02004173#if defined(__APPLE__) && !defined(__Userspace__)
tuexendd729232011-11-01 23:04:43 +00004174 so = SCTP_INP_SO(stcb->sctp_ep);
4175 atomic_add_int(&stcb->asoc.refcnt, 1);
4176 SCTP_TCB_UNLOCK(stcb);
4177 SCTP_SOCKET_LOCK(so, 1);
4178 SCTP_TCB_LOCK(stcb);
4179 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4180 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4181 /* assoc was freed while we were unlocked */
4182 SCTP_SOCKET_UNLOCK(so, 1);
4183 return;
4184 }
4185#endif
4186 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
Michael Tuexen5be0c252020-06-13 00:53:56 +02004187#if defined(__APPLE__) && !defined(__Userspace__)
tuexendd729232011-11-01 23:04:43 +00004188 SCTP_SOCKET_UNLOCK(so, 1);
4189#endif
4190 } else {
4191 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
tuexen9784e9a2011-12-18 13:04:23 +00004192 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
tuexendd729232011-11-01 23:04:43 +00004193 }
4194 }
4195
4196 /* JRS - Use the congestion control given in the CC module */
4197 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4198 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4199 if (net->net_ack2 > 0) {
4200 /*
4201 * Karn's rule applies to clearing error count, this
4202 * is optional.
4203 */
4204 net->error_count = 0;
Michael Tuexen8d6935d2022-05-15 01:51:24 +02004205 if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
tuexendd729232011-11-01 23:04:43 +00004206 /* addr came good */
4207 net->dest_state |= SCTP_ADDR_REACHABLE;
4208 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
tuexenda53ff02012-05-14 09:00:59 +00004209 0, (void *)net, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00004210 }
4211 if (net == stcb->asoc.primary_destination) {
4212 if (stcb->asoc.alternate) {
4213 /* release the alternate, primary is good */
4214 sctp_free_remote_addr(stcb->asoc.alternate);
4215 stcb->asoc.alternate = NULL;
4216 }
4217 }
4218 if (net->dest_state & SCTP_ADDR_PF) {
4219 net->dest_state &= ~SCTP_ADDR_PF;
t00fcxen0057a6d2015-05-28 16:42:49 +00004220 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4221 stcb->sctp_ep, stcb, net,
Michael Tuexen555c8e82020-07-23 03:43:26 +02004222 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
tuexendd729232011-11-01 23:04:43 +00004223 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4224 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4225 /* Done with this net */
4226 net->net_ack = 0;
4227 }
4228 /* restore any doubled timers */
4229 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4230 if (net->RTO < stcb->asoc.minrto) {
4231 net->RTO = stcb->asoc.minrto;
4232 }
4233 if (net->RTO > stcb->asoc.maxrto) {
4234 net->RTO = stcb->asoc.maxrto;
4235 }
4236 }
4237 }
4238 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4239 }
4240 asoc->last_acked_seq = cumack;
4241
4242 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4243 /* nothing left in-flight */
4244 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4245 net->flight_size = 0;
4246 net->partial_bytes_acked = 0;
4247 }
4248 asoc->total_flight = 0;
4249 asoc->total_flight_count = 0;
4250 }
4251
4252 /* RWND update */
4253 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4254 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4255 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4256 /* SWS sender side engages */
4257 asoc->peers_rwnd = 0;
4258 }
4259 if (asoc->peers_rwnd > old_rwnd) {
4260 win_probe_recovery = 1;
4261 }
4262 /* Now assure a timer where data is queued at */
4263again:
4264 j = 0;
4265 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
tuexendd729232011-11-01 23:04:43 +00004266 if (win_probe_recovery && (net->window_probe)) {
4267 win_probe_recovered = 1;
4268 /*
4269 * Find first chunk that was used with window probe
4270 * and clear the sent
4271 */
4272 /* sa_ignore FREED_MEMORY */
4273 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4274 if (tp1->window_probe) {
4275 /* move back to data send queue */
tuexen9784e9a2011-12-18 13:04:23 +00004276 sctp_window_probe_recovery(stcb, asoc, tp1);
tuexendd729232011-11-01 23:04:43 +00004277 break;
4278 }
4279 }
4280 }
tuexendd729232011-11-01 23:04:43 +00004281 if (net->flight_size) {
4282 j++;
Michael Tuexena7360a12017-09-17 11:30:34 +02004283 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
tuexendd729232011-11-01 23:04:43 +00004284 if (net->window_probe) {
4285 net->window_probe = 0;
4286 }
4287 } else {
4288 if (net->window_probe) {
4289 /* In window probes we must assure a timer is still running there */
4290 net->window_probe = 0;
4291 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
Michael Tuexena7360a12017-09-17 11:30:34 +02004292 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
tuexendd729232011-11-01 23:04:43 +00004293 }
4294 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4295 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4296 stcb, net,
Michael Tuexen555c8e82020-07-23 03:43:26 +02004297 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
tuexendd729232011-11-01 23:04:43 +00004298 }
4299 }
4300 }
4301 if ((j == 0) &&
4302 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4303 (asoc->sent_queue_retran_cnt == 0) &&
4304 (win_probe_recovered == 0) &&
4305 (done_once == 0)) {
4306 /* huh, this should not happen unless all packets
4307 * are PR-SCTP and marked to skip of course.
4308 */
4309 if (sctp_fs_audit(asoc)) {
4310 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4311 net->flight_size = 0;
4312 }
4313 asoc->total_flight = 0;
4314 asoc->total_flight_count = 0;
4315 asoc->sent_queue_retran_cnt = 0;
4316 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4317 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4318 sctp_flight_size_increase(tp1);
4319 sctp_total_flight_increase(stcb, tp1);
4320 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4321 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4322 }
4323 }
4324 }
4325 done_once = 1;
4326 goto again;
4327 }
4328 /**********************************/
4329 /* Now what about shutdown issues */
4330 /**********************************/
4331 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4332 /* nothing left on sendqueue.. consider done */
4333 /* clean up */
4334 if ((asoc->stream_queue_cnt == 1) &&
4335 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
Michael Tuexen348a36c2018-08-13 16:24:47 +02004336 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
Michael Tuexenfdcf7902016-08-06 14:39:31 +02004337 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
Michael Tuexen348a36c2018-08-13 16:24:47 +02004338 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
tuexendd729232011-11-01 23:04:43 +00004339 }
Michael Tuexen74842cb2017-07-20 13:15:46 +02004340 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
Michael Tuexen348a36c2018-08-13 16:24:47 +02004341 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
Michael Tuexen74842cb2017-07-20 13:15:46 +02004342 (asoc->stream_queue_cnt == 1) &&
4343 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4344 struct mbuf *op_err;
4345
4346 *abort_now = 1;
4347 /* XXX */
4348 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
Michael Tuexen555c8e82020-07-23 03:43:26 +02004349 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
Michael Tuexen1ade45c2021-07-09 23:32:42 +02004350 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
Michael Tuexen74842cb2017-07-20 13:15:46 +02004351 return;
4352 }
tuexendd729232011-11-01 23:04:43 +00004353 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4354 (asoc->stream_queue_cnt == 0)) {
Michael Tuexen74842cb2017-07-20 13:15:46 +02004355 struct sctp_nets *netp;
tuexendd729232011-11-01 23:04:43 +00004356
Michael Tuexen348a36c2018-08-13 16:24:47 +02004357 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4358 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
Michael Tuexen74842cb2017-07-20 13:15:46 +02004359 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
tuexendd729232011-11-01 23:04:43 +00004360 }
Michael Tuexen348a36c2018-08-13 16:24:47 +02004361 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
Michael Tuexen74842cb2017-07-20 13:15:46 +02004362 sctp_stop_timers_for_shutdown(stcb);
4363 if (asoc->alternate) {
4364 netp = asoc->alternate;
4365 } else {
4366 netp = asoc->primary_destination;
4367 }
4368 sctp_send_shutdown(stcb, netp);
4369 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4370 stcb->sctp_ep, stcb, netp);
4371 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
Michael Tuexend07a5f22020-03-19 23:34:46 +01004372 stcb->sctp_ep, stcb, NULL);
Michael Tuexen348a36c2018-08-13 16:24:47 +02004373 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
tuexendd729232011-11-01 23:04:43 +00004374 (asoc->stream_queue_cnt == 0)) {
4375 struct sctp_nets *netp;
t00fcxend0ad16b2013-02-09 18:34:24 +00004376
tuexendd729232011-11-01 23:04:43 +00004377 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
Michael Tuexen348a36c2018-08-13 16:24:47 +02004378 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
tuexendd729232011-11-01 23:04:43 +00004379 sctp_stop_timers_for_shutdown(stcb);
t00fcxend0ad16b2013-02-09 18:34:24 +00004380 if (asoc->alternate) {
4381 netp = asoc->alternate;
4382 } else {
4383 netp = asoc->primary_destination;
4384 }
4385 sctp_send_shutdown_ack(stcb, netp);
tuexendd729232011-11-01 23:04:43 +00004386 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4387 stcb->sctp_ep, stcb, netp);
4388 }
4389 }
4390 /*********************************************/
4391 /* Here we perform PR-SCTP procedures */
4392 /* (section 4.2) */
4393 /*********************************************/
4394 /* C1. update advancedPeerAckPoint */
4395 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4396 asoc->advanced_peer_ack_point = cumack;
4397 }
4398 /* PR-Sctp issues need to be addressed too */
t00fcxen0e78cef2014-08-02 22:05:33 +00004399 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
tuexendd729232011-11-01 23:04:43 +00004400 struct sctp_tmit_chunk *lchk;
4401 uint32_t old_adv_peer_ack_point;
tuexen15f99d82012-04-19 16:08:38 +00004402
tuexendd729232011-11-01 23:04:43 +00004403 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4404 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4405 /* C3. See if we need to send a Fwd-TSN */
4406 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4407 /*
4408 * ISSUE with ECN, see FWD-TSN processing.
4409 */
4410 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4411 send_forward_tsn(stcb, asoc);
4412 } else if (lchk) {
4413 /* try to FR fwd-tsn's that get lost too */
4414 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4415 send_forward_tsn(stcb, asoc);
4416 }
4417 }
4418 }
Michael Tuexena8f3d9d2020-05-10 19:35:08 +02004419 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4420 if (lchk->whoTo != NULL) {
4421 break;
4422 }
4423 }
4424 if (lchk != NULL) {
tuexendd729232011-11-01 23:04:43 +00004425 /* Assure a timer is up */
4426 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
Michael Tuexena8f3d9d2020-05-10 19:35:08 +02004427 stcb->sctp_ep, stcb, lchk->whoTo);
tuexendd729232011-11-01 23:04:43 +00004428 }
4429 }
4430 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4431 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4432 rwnd,
4433 stcb->asoc.peers_rwnd,
4434 stcb->asoc.total_flight,
4435 stcb->asoc.total_output_queue_size);
4436 }
4437}
4438
4439void
4440sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
tuexen9784e9a2011-12-18 13:04:23 +00004441 struct sctp_tcb *stcb,
tuexendd729232011-11-01 23:04:43 +00004442 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4443 int *abort_now, uint8_t flags,
4444 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4445{
4446 struct sctp_association *asoc;
4447 struct sctp_tmit_chunk *tp1, *tp2;
4448 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
tuexendd729232011-11-01 23:04:43 +00004449 uint16_t wake_him = 0;
4450 uint32_t send_s = 0;
4451 long j;
4452 int accum_moved = 0;
4453 int will_exit_fast_recovery = 0;
4454 uint32_t a_rwnd, old_rwnd;
4455 int win_probe_recovery = 0;
4456 int win_probe_recovered = 0;
4457 struct sctp_nets *net = NULL;
tuexendd729232011-11-01 23:04:43 +00004458 int done_once;
tuexen63fc0bb2011-12-27 12:24:52 +00004459 int rto_ok = 1;
tuexendd729232011-11-01 23:04:43 +00004460 uint8_t reneged_all = 0;
4461 uint8_t cmt_dac_flag;
4462 /*
4463 * we take any chance we can to service our queues since we cannot
4464 * get awoken when the socket is read from :<
4465 */
4466 /*
4467 * Now perform the actual SACK handling: 1) Verify that it is not an
4468 * old sack, if so discard. 2) If there is nothing left in the send
4469 * queue (cum-ack is equal to last acked) then you have a duplicate
4470 * too, update any rwnd change and verify no timers are running.
David Sandersb519f512022-03-29 13:55:16 -07004471 * then return. 3) Process any new consecutive data i.e. cum-ack
tuexendd729232011-11-01 23:04:43 +00004472 * moved process these first and note that it moved. 4) Process any
4473 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4474 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4475 * sync up flightsizes and things, stop all timers and also check
4476 * for shutdown_pending state. If so then go ahead and send off the
4477 * shutdown. If in shutdown recv, send off the shutdown-ack and
4478 * start that timer, Ret. 9) Strike any non-acked things and do FR
4479 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4480 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4481 * if in shutdown_recv state.
4482 */
4483 SCTP_TCB_LOCK_ASSERT(stcb);
4484 /* CMT DAC algo */
4485 this_sack_lowest_newack = 0;
tuexendd729232011-11-01 23:04:43 +00004486 SCTP_STAT_INCR(sctps_slowpath_sack);
4487 last_tsn = cum_ack;
4488 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4489#ifdef SCTP_ASOCLOG_OF_TSNS
4490 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4491 stcb->asoc.cumack_log_at++;
4492 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4493 stcb->asoc.cumack_log_at = 0;
4494 }
4495#endif
4496 a_rwnd = rwnd;
4497
4498 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4499 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4500 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4501 }
4502
4503 old_rwnd = stcb->asoc.peers_rwnd;
4504 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4505 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4506 stcb->asoc.overall_error_count,
4507 0,
4508 SCTP_FROM_SCTP_INDATA,
4509 __LINE__);
4510 }
4511 stcb->asoc.overall_error_count = 0;
4512 asoc = &stcb->asoc;
4513 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4514 sctp_log_sack(asoc->last_acked_seq,
4515 cum_ack,
4516 0,
4517 num_seg,
4518 num_dup,
4519 SCTP_LOG_NEW_SACK);
4520 }
4521 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4522 uint16_t i;
4523 uint32_t *dupdata, dblock;
4524
4525 for (i = 0; i < num_dup; i++) {
4526 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4527 sizeof(uint32_t), (uint8_t *)&dblock);
4528 if (dupdata == NULL) {
4529 break;
4530 }
4531 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4532 }
4533 }
Michael Tuexen0ec21502016-05-12 18:39:01 +02004534 /* reality check */
4535 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4536 tp1 = TAILQ_LAST(&asoc->sent_queue,
4537 sctpchunk_listhead);
Michael Tuexen00657ac2016-12-07 21:53:26 +01004538 send_s = tp1->rec.data.tsn + 1;
Michael Tuexen0ec21502016-05-12 18:39:01 +02004539 } else {
4540 tp1 = NULL;
4541 send_s = asoc->sending_seq;
4542 }
4543 if (SCTP_TSN_GE(cum_ack, send_s)) {
4544 struct mbuf *op_err;
4545 char msg[SCTP_DIAG_INFO_LEN];
t00fcxen08f9ff92014-03-16 13:38:54 +00004546
Michael Tuexen0ec21502016-05-12 18:39:01 +02004547 /*
4548 * no way, we have not even sent this TSN out yet.
4549 * Peer is hopelessly messed up with us.
4550 */
4551 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4552 cum_ack, send_s);
4553 if (tp1) {
4554 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01004555 tp1->rec.data.tsn, (void *)tp1);
tuexendd729232011-11-01 23:04:43 +00004556 }
Michael Tuexen0ec21502016-05-12 18:39:01 +02004557 hopeless_peer:
4558 *abort_now = 1;
4559 /* XXX */
Michael Tuexenedd369d2020-05-19 09:42:15 +02004560 SCTP_SNPRINTF(msg, sizeof(msg),
4561 "Cum ack %8.8x greater or equal than TSN %8.8x",
4562 cum_ack, send_s);
Michael Tuexen0ec21502016-05-12 18:39:01 +02004563 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexen555c8e82020-07-23 03:43:26 +02004564 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29;
Michael Tuexen1ade45c2021-07-09 23:32:42 +02004565 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
Michael Tuexen0ec21502016-05-12 18:39:01 +02004566 return;
tuexendd729232011-11-01 23:04:43 +00004567 }
4568 /**********************/
4569 /* 1) check the range */
4570 /**********************/
4571 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4572 /* acking something behind */
4573 return;
4574 }
tuexendd729232011-11-01 23:04:43 +00004575
4576 /* update the Rwnd of the peer */
4577 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4578 TAILQ_EMPTY(&asoc->send_queue) &&
4579 (asoc->stream_queue_cnt == 0)) {
4580 /* nothing left on send/sent and strmq */
4581 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4582 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4583 asoc->peers_rwnd, 0, 0, a_rwnd);
4584 }
4585 asoc->peers_rwnd = a_rwnd;
4586 if (asoc->sent_queue_retran_cnt) {
4587 asoc->sent_queue_retran_cnt = 0;
4588 }
4589 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4590 /* SWS sender side engages */
4591 asoc->peers_rwnd = 0;
4592 }
4593 /* stop any timers */
4594 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4595 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
Michael Tuexen555c8e82020-07-23 03:43:26 +02004596 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
tuexendd729232011-11-01 23:04:43 +00004597 net->partial_bytes_acked = 0;
4598 net->flight_size = 0;
4599 }
4600 asoc->total_flight = 0;
4601 asoc->total_flight_count = 0;
4602 return;
4603 }
4604 /*
4605 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4606 * things. The total byte count acked is tracked in netAckSz AND
4607 * netAck2 is used to track the total bytes acked that are un-
David Sandersb519f512022-03-29 13:55:16 -07004608 * ambiguous and were never retransmitted. We track these on a per
tuexendd729232011-11-01 23:04:43 +00004609 * destination address basis.
4610 */
4611 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4612 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4613 /* Drag along the window_tsn for cwr's */
4614 net->cwr_window_tsn = cum_ack;
4615 }
4616 net->prev_cwnd = net->cwnd;
4617 net->net_ack = 0;
4618 net->net_ack2 = 0;
4619
4620 /*
4621 * CMT: Reset CUC and Fast recovery algo variables before
4622 * SACK processing
4623 */
4624 net->new_pseudo_cumack = 0;
4625 net->will_exit_fast_recovery = 0;
4626 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4627 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
4628 }
Michael Tuexen83714a82018-01-16 23:02:09 +01004629
4630 /*
4631 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4632 * to be greater than the cumack. Also reset saw_newack to 0
4633 * for all dests.
4634 */
4635 net->saw_newack = 0;
4636 net->this_sack_highest_newack = last_tsn;
tuexendd729232011-11-01 23:04:43 +00004637 }
4638 /* process the new consecutive TSN first */
4639 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004640 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
tuexendd729232011-11-01 23:04:43 +00004641 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4642 accum_moved = 1;
4643 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4644 /*
4645 * If it is less than ACKED, it is
4646 * now no-longer in flight. Higher
4647 * values may occur during marking
4648 */
4649 if ((tp1->whoTo->dest_state &
4650 SCTP_ADDR_UNCONFIRMED) &&
4651 (tp1->snd_count < 2)) {
4652 /*
4653 * If there was no retran
4654 * and the address is
4655 * un-confirmed and we sent
4656 * there and are now
4657 * sacked.. its confirmed,
4658 * mark it so.
4659 */
4660 tp1->whoTo->dest_state &=
4661 ~SCTP_ADDR_UNCONFIRMED;
4662 }
4663 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4664 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4665 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4666 tp1->whoTo->flight_size,
4667 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01004668 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01004669 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00004670 }
4671 sctp_flight_size_decrease(tp1);
4672 sctp_total_flight_decrease(stcb, tp1);
4673 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4674 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
4675 tp1);
4676 }
4677 }
4678 tp1->whoTo->net_ack += tp1->send_size;
4679
4680 /* CMT SFR and DAC algos */
Michael Tuexen00657ac2016-12-07 21:53:26 +01004681 this_sack_lowest_newack = tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00004682 tp1->whoTo->saw_newack = 1;
4683
4684 if (tp1->snd_count < 2) {
4685 /*
Michael Tuexenc51af972018-08-12 15:32:55 +02004686 * True non-retransmitted
tuexendd729232011-11-01 23:04:43 +00004687 * chunk
4688 */
4689 tp1->whoTo->net_ack2 +=
4690 tp1->send_size;
4691
4692 /* update RTO too? */
4693 if (tp1->do_rtt) {
Michael Tuexenb7ed78b2019-09-22 12:48:36 +02004694 if (rto_ok &&
4695 sctp_calculate_rto(stcb,
4696 &stcb->asoc,
4697 tp1->whoTo,
4698 &tp1->sent_rcv_time,
4699 SCTP_RTT_FROM_DATA)) {
tuexendd729232011-11-01 23:04:43 +00004700 rto_ok = 0;
4701 }
4702 if (tp1->whoTo->rto_needed == 0) {
4703 tp1->whoTo->rto_needed = 1;
4704 }
4705 tp1->do_rtt = 0;
4706 }
4707 }
4708 /*
4709 * CMT: CUCv2 algorithm. From the
4710 * cumack'd TSNs, for each TSN being
4711 * acked for the first time, set the
4712 * following variables for the
4713 * corresp destination.
4714 * new_pseudo_cumack will trigger a
4715 * cwnd update.
4716 * find_(rtx_)pseudo_cumack will
4717 * trigger search for the next
4718 * expected (rtx-)pseudo-cumack.
4719 */
4720 tp1->whoTo->new_pseudo_cumack = 1;
4721 tp1->whoTo->find_pseudo_cumack = 1;
4722 tp1->whoTo->find_rtx_pseudo_cumack = 1;
tuexendd729232011-11-01 23:04:43 +00004723 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4724 sctp_log_sack(asoc->last_acked_seq,
4725 cum_ack,
Michael Tuexen00657ac2016-12-07 21:53:26 +01004726 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00004727 0,
4728 0,
4729 SCTP_LOG_TSN_ACKED);
4730 }
4731 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004732 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
tuexendd729232011-11-01 23:04:43 +00004733 }
4734 }
4735 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4736 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4737#ifdef SCTP_AUDITING_ENABLED
4738 sctp_audit_log(0xB3,
4739 (asoc->sent_queue_retran_cnt & 0x000000ff));
4740#endif
4741 }
4742 if (tp1->rec.data.chunk_was_revoked) {
4743 /* deflate the cwnd */
4744 tp1->whoTo->cwnd -= tp1->book_size;
4745 tp1->rec.data.chunk_was_revoked = 0;
4746 }
t00fcxen8fcc5142012-11-16 19:46:12 +00004747 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4748 tp1->sent = SCTP_DATAGRAM_ACKED;
4749 }
tuexendd729232011-11-01 23:04:43 +00004750 }
4751 } else {
4752 break;
4753 }
4754 }
4755 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4756 /* always set this up to cum-ack */
4757 asoc->this_sack_highest_gap = last_tsn;
4758
4759 if ((num_seg > 0) || (num_nr_seg > 0)) {
tuexendd729232011-11-01 23:04:43 +00004760 /*
tuexendd729232011-11-01 23:04:43 +00004761 * thisSackHighestGap will increase while handling NEW
4762 * segments this_sack_highest_newack will increase while
4763 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4764 * used for CMT DAC algo. saw_newack will also change.
4765 */
4766 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4767 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
tuexen9784e9a2011-12-18 13:04:23 +00004768 num_seg, num_nr_seg, &rto_ok)) {
tuexendd729232011-11-01 23:04:43 +00004769 wake_him++;
4770 }
Michael Tuexen0ec21502016-05-12 18:39:01 +02004771 /*
4772 * validate the biggest_tsn_acked in the gap acks if
4773 * strict adherence is wanted.
4774 */
4775 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
tuexendd729232011-11-01 23:04:43 +00004776 /*
Michael Tuexen0ec21502016-05-12 18:39:01 +02004777 * peer is either confused or we are under
4778 * attack. We must abort.
tuexendd729232011-11-01 23:04:43 +00004779 */
Michael Tuexen0ec21502016-05-12 18:39:01 +02004780 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4781 biggest_tsn_acked, send_s);
4782 goto hopeless_peer;
tuexendd729232011-11-01 23:04:43 +00004783 }
4784 }
4785 /*******************************************/
4786 /* cancel ALL T3-send timer if accum moved */
4787 /*******************************************/
4788 if (asoc->sctp_cmt_on_off > 0) {
4789 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4790 if (net->new_pseudo_cumack)
4791 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4792 stcb, net,
Michael Tuexen555c8e82020-07-23 03:43:26 +02004793 SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
tuexendd729232011-11-01 23:04:43 +00004794 }
4795 } else {
4796 if (accum_moved) {
4797 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4798 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
Michael Tuexen555c8e82020-07-23 03:43:26 +02004799 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
tuexendd729232011-11-01 23:04:43 +00004800 }
4801 }
4802 }
4803 /********************************************/
4804 /* drop the acked chunks from the sentqueue */
4805 /********************************************/
4806 asoc->last_acked_seq = cum_ack;
4807
4808 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004809 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
tuexendd729232011-11-01 23:04:43 +00004810 break;
4811 }
t00fcxen8fcc5142012-11-16 19:46:12 +00004812 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004813 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4814 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
t00fcxen9ad90772012-11-07 22:19:57 +00004815#ifdef INVARIANTS
4816 } else {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004817 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
t00fcxen9ad90772012-11-07 22:19:57 +00004818#endif
4819 }
tuexendd729232011-11-01 23:04:43 +00004820 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01004821 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4822 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4823 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
Michael Tuexenc0a12d12015-12-03 16:30:24 +01004824 asoc->trigger_reset = 1;
4825 }
tuexendd729232011-11-01 23:04:43 +00004826 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
t00fcxen91ceb732013-09-03 19:40:11 +00004827 if (PR_SCTP_ENABLED(tp1->flags)) {
tuexendd729232011-11-01 23:04:43 +00004828 if (asoc->pr_sctp_cnt != 0)
4829 asoc->pr_sctp_cnt--;
4830 }
4831 asoc->sent_queue_cnt--;
4832 if (tp1->data) {
4833 /* sa_ignore NO_NULL_CHK */
4834 sctp_free_bufspace(stcb, asoc, tp1, 1);
4835 sctp_m_freem(tp1->data);
4836 tp1->data = NULL;
t00fcxen0e78cef2014-08-02 22:05:33 +00004837 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
tuexendd729232011-11-01 23:04:43 +00004838 asoc->sent_queue_cnt_removeable--;
4839 }
4840 }
4841 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4842 sctp_log_sack(asoc->last_acked_seq,
4843 cum_ack,
Michael Tuexen00657ac2016-12-07 21:53:26 +01004844 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00004845 0,
4846 0,
4847 SCTP_LOG_FREE_SENT);
4848 }
4849 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4850 wake_him++;
4851 }
4852 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4853#ifdef INVARIANTS
Michael Tuexen34488e72016-05-03 22:11:59 +02004854 panic("Warning flight size is positive and should be 0");
tuexendd729232011-11-01 23:04:43 +00004855#else
4856 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4857 asoc->total_flight);
4858#endif
4859 asoc->total_flight = 0;
4860 }
4861
tuexen98456cf2012-04-19 15:37:07 +00004862#if defined(__Userspace__)
4863 if (stcb->sctp_ep->recv_callback) {
4864 if (stcb->sctp_socket) {
4865 uint32_t inqueue_bytes, sb_free_now;
4866 struct sctp_inpcb *inp;
tuexen749d8562011-11-13 13:41:49 +00004867
tuexen98456cf2012-04-19 15:37:07 +00004868 inp = stcb->sctp_ep;
tuexen749d8562011-11-13 13:41:49 +00004869 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
tuexen1ee04c82012-04-19 16:35:13 +00004870 sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
tuexen98456cf2012-04-19 15:37:07 +00004871
4872 /* check if the amount free in the send socket buffer crossed the threshold */
4873 if (inp->send_callback &&
4874 (((inp->send_sb_threshold > 0) && (sb_free_now >= inp->send_sb_threshold)) ||
4875 (inp->send_sb_threshold == 0))) {
4876 atomic_add_int(&stcb->asoc.refcnt, 1);
4877 SCTP_TCB_UNLOCK(stcb);
José Luis Millán3df8f522020-08-01 12:34:40 +02004878 inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info);
tuexen98456cf2012-04-19 15:37:07 +00004879 SCTP_TCB_LOCK(stcb);
4880 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4881 }
tuexen749d8562011-11-13 13:41:49 +00004882 }
tuexen98456cf2012-04-19 15:37:07 +00004883 } else if ((wake_him) && (stcb->sctp_socket)) {
tuexen749d8562011-11-13 13:41:49 +00004884#else
tuexendd729232011-11-01 23:04:43 +00004885 /* sa_ignore NO_NULL_CHK */
4886 if ((wake_him) && (stcb->sctp_socket)) {
tuexen98456cf2012-04-19 15:37:07 +00004887#endif
Michael Tuexen5be0c252020-06-13 00:53:56 +02004888#if defined(__APPLE__) && !defined(__Userspace__)
tuexendd729232011-11-01 23:04:43 +00004889 struct socket *so;
4890
4891#endif
4892 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4893 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
tuexen9784e9a2011-12-18 13:04:23 +00004894 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
tuexendd729232011-11-01 23:04:43 +00004895 }
Michael Tuexen5be0c252020-06-13 00:53:56 +02004896#if defined(__APPLE__) && !defined(__Userspace__)
tuexendd729232011-11-01 23:04:43 +00004897 so = SCTP_INP_SO(stcb->sctp_ep);
4898 atomic_add_int(&stcb->asoc.refcnt, 1);
4899 SCTP_TCB_UNLOCK(stcb);
4900 SCTP_SOCKET_LOCK(so, 1);
4901 SCTP_TCB_LOCK(stcb);
4902 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4903 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4904 /* assoc was freed while we were unlocked */
4905 SCTP_SOCKET_UNLOCK(so, 1);
4906 return;
4907 }
4908#endif
4909 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
Michael Tuexen5be0c252020-06-13 00:53:56 +02004910#if defined(__APPLE__) && !defined(__Userspace__)
tuexendd729232011-11-01 23:04:43 +00004911 SCTP_SOCKET_UNLOCK(so, 1);
4912#endif
4913 } else {
4914 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
tuexen9784e9a2011-12-18 13:04:23 +00004915 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
tuexendd729232011-11-01 23:04:43 +00004916 }
4917 }
tuexendd729232011-11-01 23:04:43 +00004918
4919 if (asoc->fast_retran_loss_recovery && accum_moved) {
4920 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4921 /* Setup so we will exit RFC2582 fast recovery */
4922 will_exit_fast_recovery = 1;
4923 }
4924 }
4925 /*
4926 * Check for revoked fragments:
4927 *
4928 * if Previous sack - Had no frags then we can't have any revoked if
4929 * Previous sack - Had frag's then - If we now have frags aka
4930 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4931 * some of them. else - The peer revoked all ACKED fragments, since
4932 * we had some before and now we have NONE.
4933 */
4934
4935 if (num_seg) {
4936 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4937 asoc->saw_sack_with_frags = 1;
4938 } else if (asoc->saw_sack_with_frags) {
4939 int cnt_revoked = 0;
4940
4941 /* Peer revoked all dg's marked or acked */
4942 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4943 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4944 tp1->sent = SCTP_DATAGRAM_SENT;
4945 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4946 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4947 tp1->whoTo->flight_size,
4948 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01004949 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01004950 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00004951 }
4952 sctp_flight_size_increase(tp1);
4953 sctp_total_flight_increase(stcb, tp1);
4954 tp1->rec.data.chunk_was_revoked = 1;
4955 /*
4956 * To ensure that this increase in
4957 * flightsize, which is artificial,
4958 * does not throttle the sender, we
4959 * also increase the cwnd
4960 * artificially.
4961 */
4962 tp1->whoTo->cwnd += tp1->book_size;
4963 cnt_revoked++;
4964 }
4965 }
4966 if (cnt_revoked) {
4967 reneged_all = 1;
4968 }
4969 asoc->saw_sack_with_frags = 0;
4970 }
4971 if (num_nr_seg > 0)
4972 asoc->saw_sack_with_nr_frags = 1;
4973 else
4974 asoc->saw_sack_with_nr_frags = 0;
4975
4976 /* JRS - Use the congestion control given in the CC module */
4977 if (ecne_seen == 0) {
4978 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4979 if (net->net_ack2 > 0) {
4980 /*
4981 * Karn's rule applies to clearing error count, this
4982 * is optional.
4983 */
4984 net->error_count = 0;
Michael Tuexen8d6935d2022-05-15 01:51:24 +02004985 if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
tuexendd729232011-11-01 23:04:43 +00004986 /* addr came good */
4987 net->dest_state |= SCTP_ADDR_REACHABLE;
4988 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
tuexenda53ff02012-05-14 09:00:59 +00004989 0, (void *)net, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00004990 }
4991
4992 if (net == stcb->asoc.primary_destination) {
4993 if (stcb->asoc.alternate) {
4994 /* release the alternate, primary is good */
4995 sctp_free_remote_addr(stcb->asoc.alternate);
4996 stcb->asoc.alternate = NULL;
4997 }
4998 }
4999
5000 if (net->dest_state & SCTP_ADDR_PF) {
5001 net->dest_state &= ~SCTP_ADDR_PF;
t00fcxen0057a6d2015-05-28 16:42:49 +00005002 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
5003 stcb->sctp_ep, stcb, net,
Michael Tuexen555c8e82020-07-23 03:43:26 +02005004 SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
tuexendd729232011-11-01 23:04:43 +00005005 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
5006 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
5007 /* Done with this net */
5008 net->net_ack = 0;
5009 }
5010 /* restore any doubled timers */
5011 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
5012 if (net->RTO < stcb->asoc.minrto) {
5013 net->RTO = stcb->asoc.minrto;
5014 }
5015 if (net->RTO > stcb->asoc.maxrto) {
5016 net->RTO = stcb->asoc.maxrto;
5017 }
5018 }
5019 }
5020 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5021 }
5022
5023 if (TAILQ_EMPTY(&asoc->sent_queue)) {
5024 /* nothing left in-flight */
5025 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5026 /* stop all timers */
5027 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
t00fcxen0057a6d2015-05-28 16:42:49 +00005028 stcb, net,
Michael Tuexen555c8e82020-07-23 03:43:26 +02005029 SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
tuexendd729232011-11-01 23:04:43 +00005030 net->flight_size = 0;
5031 net->partial_bytes_acked = 0;
5032 }
5033 asoc->total_flight = 0;
5034 asoc->total_flight_count = 0;
5035 }
5036
5037 /**********************************/
5038 /* Now what about shutdown issues */
5039 /**********************************/
5040 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5041 /* nothing left on sendqueue.. consider done */
5042 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5043 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5044 asoc->peers_rwnd, 0, 0, a_rwnd);
5045 }
5046 asoc->peers_rwnd = a_rwnd;
5047 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5048 /* SWS sender side engages */
5049 asoc->peers_rwnd = 0;
5050 }
5051 /* clean up */
5052 if ((asoc->stream_queue_cnt == 1) &&
5053 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
Michael Tuexen348a36c2018-08-13 16:24:47 +02005054 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005055 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
Michael Tuexen348a36c2018-08-13 16:24:47 +02005056 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
tuexendd729232011-11-01 23:04:43 +00005057 }
Michael Tuexen74842cb2017-07-20 13:15:46 +02005058 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
Michael Tuexen348a36c2018-08-13 16:24:47 +02005059 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
Michael Tuexen74842cb2017-07-20 13:15:46 +02005060 (asoc->stream_queue_cnt == 1) &&
5061 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5062 struct mbuf *op_err;
5063
5064 *abort_now = 1;
5065 /* XXX */
5066 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
Michael Tuexen555c8e82020-07-23 03:43:26 +02005067 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
Michael Tuexen1ade45c2021-07-09 23:32:42 +02005068 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
Michael Tuexen74842cb2017-07-20 13:15:46 +02005069 return;
5070 }
tuexendd729232011-11-01 23:04:43 +00005071 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5072 (asoc->stream_queue_cnt == 0)) {
Michael Tuexen74842cb2017-07-20 13:15:46 +02005073 struct sctp_nets *netp;
t00fcxen08f9ff92014-03-16 13:38:54 +00005074
Michael Tuexen348a36c2018-08-13 16:24:47 +02005075 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5076 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
Michael Tuexen74842cb2017-07-20 13:15:46 +02005077 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
tuexendd729232011-11-01 23:04:43 +00005078 }
Michael Tuexen348a36c2018-08-13 16:24:47 +02005079 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
Michael Tuexen74842cb2017-07-20 13:15:46 +02005080 sctp_stop_timers_for_shutdown(stcb);
5081 if (asoc->alternate) {
5082 netp = asoc->alternate;
5083 } else {
5084 netp = asoc->primary_destination;
5085 }
5086 sctp_send_shutdown(stcb, netp);
5087 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5088 stcb->sctp_ep, stcb, netp);
5089 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
Michael Tuexend07a5f22020-03-19 23:34:46 +01005090 stcb->sctp_ep, stcb, NULL);
tuexendd729232011-11-01 23:04:43 +00005091 return;
Michael Tuexen348a36c2018-08-13 16:24:47 +02005092 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
tuexendd729232011-11-01 23:04:43 +00005093 (asoc->stream_queue_cnt == 0)) {
5094 struct sctp_nets *netp;
t00fcxend0ad16b2013-02-09 18:34:24 +00005095
tuexendd729232011-11-01 23:04:43 +00005096 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
Michael Tuexen348a36c2018-08-13 16:24:47 +02005097 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
tuexendd729232011-11-01 23:04:43 +00005098 sctp_stop_timers_for_shutdown(stcb);
t00fcxend0ad16b2013-02-09 18:34:24 +00005099 if (asoc->alternate) {
5100 netp = asoc->alternate;
5101 } else {
5102 netp = asoc->primary_destination;
5103 }
5104 sctp_send_shutdown_ack(stcb, netp);
tuexendd729232011-11-01 23:04:43 +00005105 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5106 stcb->sctp_ep, stcb, netp);
5107 return;
5108 }
5109 }
5110 /*
5111 * Now here we are going to recycle net_ack for a different use...
5112 * HEADS UP.
5113 */
5114 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5115 net->net_ack = 0;
5116 }
5117
5118 /*
5119 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5120 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5121 * automatically ensure that.
5122 */
5123 if ((asoc->sctp_cmt_on_off > 0) &&
5124 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5125 (cmt_dac_flag == 0)) {
5126 this_sack_lowest_newack = cum_ack;
5127 }
5128 if ((num_seg > 0) || (num_nr_seg > 0)) {
5129 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5130 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5131 }
5132 /* JRS - Use the congestion control given in the CC module */
5133 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5134
5135 /* Now are we exiting loss recovery ? */
5136 if (will_exit_fast_recovery) {
5137 /* Ok, we must exit fast recovery */
5138 asoc->fast_retran_loss_recovery = 0;
5139 }
5140 if ((asoc->sat_t3_loss_recovery) &&
5141 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5142 /* end satellite t3 loss recovery */
5143 asoc->sat_t3_loss_recovery = 0;
5144 }
5145 /*
5146 * CMT Fast recovery
5147 */
5148 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5149 if (net->will_exit_fast_recovery) {
5150 /* Ok, we must exit fast recovery */
5151 net->fast_retran_loss_recovery = 0;
5152 }
5153 }
5154
5155 /* Adjust and set the new rwnd value */
5156 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5157 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5158 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5159 }
5160 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5161 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5162 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5163 /* SWS sender side engages */
5164 asoc->peers_rwnd = 0;
5165 }
5166 if (asoc->peers_rwnd > old_rwnd) {
5167 win_probe_recovery = 1;
5168 }
5169
5170 /*
5171 * Now we must setup so we have a timer up for anyone with
5172 * outstanding data.
5173 */
5174 done_once = 0;
5175again:
5176 j = 0;
5177 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5178 if (win_probe_recovery && (net->window_probe)) {
5179 win_probe_recovered = 1;
5180 /*-
5181 * Find first chunk that was used with
5182 * window probe and clear the event. Put
5183 * it back into the send queue as if has
5184 * not been sent.
5185 */
5186 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5187 if (tp1->window_probe) {
tuexen9784e9a2011-12-18 13:04:23 +00005188 sctp_window_probe_recovery(stcb, asoc, tp1);
tuexendd729232011-11-01 23:04:43 +00005189 break;
5190 }
5191 }
5192 }
5193 if (net->flight_size) {
5194 j++;
5195 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5196 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5197 stcb->sctp_ep, stcb, net);
5198 }
5199 if (net->window_probe) {
5200 net->window_probe = 0;
5201 }
5202 } else {
5203 if (net->window_probe) {
5204 /* In window probes we must assure a timer is still running there */
5205 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5206 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5207 stcb->sctp_ep, stcb, net);
tuexendd729232011-11-01 23:04:43 +00005208 }
5209 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5210 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5211 stcb, net,
Michael Tuexen555c8e82020-07-23 03:43:26 +02005212 SCTP_FROM_SCTP_INDATA + SCTP_LOC_36);
tuexendd729232011-11-01 23:04:43 +00005213 }
5214 }
5215 }
5216 if ((j == 0) &&
5217 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5218 (asoc->sent_queue_retran_cnt == 0) &&
5219 (win_probe_recovered == 0) &&
5220 (done_once == 0)) {
5221 /* huh, this should not happen unless all packets
5222 * are PR-SCTP and marked to skip of course.
5223 */
5224 if (sctp_fs_audit(asoc)) {
5225 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5226 net->flight_size = 0;
5227 }
5228 asoc->total_flight = 0;
5229 asoc->total_flight_count = 0;
5230 asoc->sent_queue_retran_cnt = 0;
5231 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5232 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5233 sctp_flight_size_increase(tp1);
5234 sctp_total_flight_increase(stcb, tp1);
5235 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5236 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5237 }
5238 }
5239 }
5240 done_once = 1;
5241 goto again;
5242 }
5243 /*********************************************/
5244 /* Here we perform PR-SCTP procedures */
5245 /* (section 4.2) */
5246 /*********************************************/
5247 /* C1. update advancedPeerAckPoint */
5248 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5249 asoc->advanced_peer_ack_point = cum_ack;
5250 }
5251 /* C2. try to further move advancedPeerAckPoint ahead */
t00fcxen0e78cef2014-08-02 22:05:33 +00005252 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
tuexendd729232011-11-01 23:04:43 +00005253 struct sctp_tmit_chunk *lchk;
5254 uint32_t old_adv_peer_ack_point;
tuexen15f99d82012-04-19 16:08:38 +00005255
tuexendd729232011-11-01 23:04:43 +00005256 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5257 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5258 /* C3. See if we need to send a Fwd-TSN */
5259 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5260 /*
5261 * ISSUE with ECN, see FWD-TSN processing.
5262 */
5263 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5264 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5265 0xee, cum_ack, asoc->advanced_peer_ack_point,
5266 old_adv_peer_ack_point);
5267 }
5268 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5269 send_forward_tsn(stcb, asoc);
5270 } else if (lchk) {
5271 /* try to FR fwd-tsn's that get lost too */
5272 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5273 send_forward_tsn(stcb, asoc);
5274 }
5275 }
5276 }
Michael Tuexena8f3d9d2020-05-10 19:35:08 +02005277 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5278 if (lchk->whoTo != NULL) {
5279 break;
5280 }
5281 }
5282 if (lchk != NULL) {
tuexendd729232011-11-01 23:04:43 +00005283 /* Assure a timer is up */
5284 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5285 stcb->sctp_ep, stcb, lchk->whoTo);
5286 }
5287 }
5288 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5289 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5290 a_rwnd,
5291 stcb->asoc.peers_rwnd,
5292 stcb->asoc.total_flight,
5293 stcb->asoc.total_output_queue_size);
5294 }
5295}
5296
5297void
tuexen9784e9a2011-12-18 13:04:23 +00005298sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
tuexendd729232011-11-01 23:04:43 +00005299{
5300 /* Copy cum-ack */
5301 uint32_t cum_ack, a_rwnd;
5302
5303 cum_ack = ntohl(cp->cumulative_tsn_ack);
5304 /* Arrange so a_rwnd does NOT change */
5305 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5306
5307 /* Now call the express sack handling */
5308 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5309}
5310
5311static void
5312sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
Michael Tuexene5001952016-04-17 19:25:27 +02005313 struct sctp_stream_in *strmin)
tuexendd729232011-11-01 23:04:43 +00005314{
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005315 struct sctp_queued_to_read *control, *ncontrol;
tuexendd729232011-11-01 23:04:43 +00005316 struct sctp_association *asoc;
Michael Tuexen00657ac2016-12-07 21:53:26 +01005317 uint32_t mid;
5318 int need_reasm_check = 0;
tuexen15f99d82012-04-19 16:08:38 +00005319
tuexendd729232011-11-01 23:04:43 +00005320 asoc = &stcb->asoc;
Michael Tuexen00657ac2016-12-07 21:53:26 +01005321 mid = strmin->last_mid_delivered;
tuexendd729232011-11-01 23:04:43 +00005322 /*
5323 * First deliver anything prior to and including the stream no that
Michael Tuexene5001952016-04-17 19:25:27 +02005324 * came in.
tuexendd729232011-11-01 23:04:43 +00005325 */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005326 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5327 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
tuexendd729232011-11-01 23:04:43 +00005328 /* this is deliverable now */
Michael Tuexen8d6935d2022-05-15 01:51:24 +02005329 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005330 if (control->on_strm_q) {
5331 if (control->on_strm_q == SCTP_ON_ORDERED) {
5332 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5333 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5334 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
Michael Tuexenf6d20c52016-04-18 11:31:05 +02005335#ifdef INVARIANTS
Michael Tuexene5001952016-04-17 19:25:27 +02005336 } else {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02005337 panic("strmin: %p ctl: %p unknown %d",
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005338 strmin, control, control->on_strm_q);
Michael Tuexenf6d20c52016-04-18 11:31:05 +02005339#endif
Michael Tuexene5001952016-04-17 19:25:27 +02005340 }
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005341 control->on_strm_q = 0;
Michael Tuexene5001952016-04-17 19:25:27 +02005342 }
5343 /* subtract pending on streams */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005344 if (asoc->size_on_all_streams >= control->length) {
5345 asoc->size_on_all_streams -= control->length;
5346 } else {
5347#ifdef INVARIANTS
5348 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5349#else
5350 asoc->size_on_all_streams = 0;
5351#endif
5352 }
Michael Tuexene5001952016-04-17 19:25:27 +02005353 sctp_ucount_decr(asoc->cnt_on_all_streams);
5354 /* deliver it to at least the delivery-q */
5355 if (stcb->sctp_socket) {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005356 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02005357 sctp_add_to_readq(stcb->sctp_ep, stcb,
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005358 control,
Michael Tuexene5001952016-04-17 19:25:27 +02005359 &stcb->sctp_socket->so_rcv,
5360 1, SCTP_READ_LOCK_HELD,
5361 SCTP_SO_NOT_LOCKED);
5362 }
5363 } else {
5364 /* Its a fragmented message */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005365 if (control->first_frag_seen) {
Michael Tuexene5001952016-04-17 19:25:27 +02005366 /* Make it so this is next to deliver, we restore later */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005367 strmin->last_mid_delivered = control->mid - 1;
Michael Tuexene5001952016-04-17 19:25:27 +02005368 need_reasm_check = 1;
5369 break;
5370 }
tuexendd729232011-11-01 23:04:43 +00005371 }
5372 } else {
5373 /* no more delivery now. */
5374 break;
5375 }
5376 }
Michael Tuexene5001952016-04-17 19:25:27 +02005377 if (need_reasm_check) {
5378 int ret;
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005379 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
Michael Tuexen00657ac2016-12-07 21:53:26 +01005380 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
Michael Tuexene5001952016-04-17 19:25:27 +02005381 /* Restore the next to deliver unless we are ahead */
Michael Tuexen00657ac2016-12-07 21:53:26 +01005382 strmin->last_mid_delivered = mid;
Michael Tuexene5001952016-04-17 19:25:27 +02005383 }
5384 if (ret == 0) {
5385 /* Left the front Partial one on */
5386 return;
5387 }
5388 need_reasm_check = 0;
5389 }
tuexendd729232011-11-01 23:04:43 +00005390 /*
5391 * now we must deliver things in queue the normal way if any are
5392 * now ready.
5393 */
Michael Tuexen00657ac2016-12-07 21:53:26 +01005394 mid = strmin->last_mid_delivered + 1;
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005395 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5396 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5397 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
Michael Tuexene5001952016-04-17 19:25:27 +02005398 /* this is deliverable now */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005399 if (control->on_strm_q) {
5400 if (control->on_strm_q == SCTP_ON_ORDERED) {
5401 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5402 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5403 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
Michael Tuexenf6d20c52016-04-18 11:31:05 +02005404#ifdef INVARIANTS
Michael Tuexene5001952016-04-17 19:25:27 +02005405 } else {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02005406 panic("strmin: %p ctl: %p unknown %d",
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005407 strmin, control, control->on_strm_q);
Michael Tuexenf6d20c52016-04-18 11:31:05 +02005408#endif
Michael Tuexene5001952016-04-17 19:25:27 +02005409 }
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005410 control->on_strm_q = 0;
Michael Tuexene5001952016-04-17 19:25:27 +02005411 }
5412 /* subtract pending on streams */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005413 if (asoc->size_on_all_streams >= control->length) {
5414 asoc->size_on_all_streams -= control->length;
5415 } else {
5416#ifdef INVARIANTS
5417 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5418#else
5419 asoc->size_on_all_streams = 0;
5420#endif
5421 }
Michael Tuexene5001952016-04-17 19:25:27 +02005422 sctp_ucount_decr(asoc->cnt_on_all_streams);
5423 /* deliver it to at least the delivery-q */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005424 strmin->last_mid_delivered = control->mid;
Michael Tuexene5001952016-04-17 19:25:27 +02005425 if (stcb->sctp_socket) {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005426 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02005427 sctp_add_to_readq(stcb->sctp_ep, stcb,
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005428 control,
Michael Tuexene5001952016-04-17 19:25:27 +02005429 &stcb->sctp_socket->so_rcv, 1,
5430 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +02005431 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01005432 mid = strmin->last_mid_delivered + 1;
Michael Tuexene5001952016-04-17 19:25:27 +02005433 } else {
5434 /* Its a fragmented message */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005435 if (control->first_frag_seen) {
Michael Tuexene5001952016-04-17 19:25:27 +02005436 /* Make it so this is next to deliver */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005437 strmin->last_mid_delivered = control->mid - 1;
Michael Tuexene5001952016-04-17 19:25:27 +02005438 need_reasm_check = 1;
5439 break;
5440 }
tuexendd729232011-11-01 23:04:43 +00005441 }
tuexendd729232011-11-01 23:04:43 +00005442 } else {
5443 break;
5444 }
5445 }
Michael Tuexene5001952016-04-17 19:25:27 +02005446 if (need_reasm_check) {
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005447 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
Michael Tuexene5001952016-04-17 19:25:27 +02005448 }
tuexendd729232011-11-01 23:04:43 +00005449}
5450
5451static void
5452sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
Michael Tuexenb10a5d32020-07-09 18:25:16 +02005453 struct sctp_association *asoc, struct sctp_stream_in *strm,
5454 struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn)
tuexendd729232011-11-01 23:04:43 +00005455{
5456 struct sctp_tmit_chunk *chk, *nchk;
Michael Tuexen00657ac2016-12-07 21:53:26 +01005457
tuexendd729232011-11-01 23:04:43 +00005458 /*
Michael Tuexene5001952016-04-17 19:25:27 +02005459 * For now large messages held on the stream reasm that are
tuexendd729232011-11-01 23:04:43 +00005460 * complete will be tossed too. We could in theory do more
5461 * work to spin through and stop after dumping one msg aka
5462 * seeing the start of a new msg at the head, and call the
5463 * delivery function... to see if it can be delivered... But
5464 * for now we just dump everything on the queue.
5465 */
Michael Tuexen7d6d4c22020-09-27 15:40:33 +02005466 if (!asoc->idata_supported && !ordered &&
5467 control->first_frag_seen &&
5468 SCTP_TSN_GT(control->fsn_included, cumtsn)) {
Michael Tuexen93e6e552016-09-22 16:25:12 +02005469 return;
5470 }
Michael Tuexene5001952016-04-17 19:25:27 +02005471 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5472 /* Purge hanging chunks */
Michael Tuexen7d6d4c22020-09-27 15:40:33 +02005473 if (!asoc->idata_supported && !ordered) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01005474 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005475 break;
5476 }
5477 }
Michael Tuexene5001952016-04-17 19:25:27 +02005478 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005479 if (asoc->size_on_reasm_queue >= chk->send_size) {
5480 asoc->size_on_reasm_queue -= chk->send_size;
5481 } else {
5482#ifdef INVARIANTS
5483 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5484#else
5485 asoc->size_on_reasm_queue = 0;
5486#endif
5487 }
Michael Tuexene5001952016-04-17 19:25:27 +02005488 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5489 if (chk->data) {
5490 sctp_m_freem(chk->data);
5491 chk->data = NULL;
tuexendd729232011-11-01 23:04:43 +00005492 }
Michael Tuexene5001952016-04-17 19:25:27 +02005493 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5494 }
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005495 if (!TAILQ_EMPTY(&control->reasm)) {
5496 /* This has to be old data, unordered */
5497 if (control->data) {
5498 sctp_m_freem(control->data);
5499 control->data = NULL;
5500 }
5501 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5502 chk = TAILQ_FIRST(&control->reasm);
5503 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5504 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5505 sctp_add_chk_to_control(control, strm, stcb, asoc,
5506 chk, SCTP_READ_LOCK_HELD);
5507 }
5508 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5509 return;
5510 }
5511 if (control->on_strm_q == SCTP_ON_ORDERED) {
5512 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005513 if (asoc->size_on_all_streams >= control->length) {
5514 asoc->size_on_all_streams -= control->length;
5515 } else {
5516#ifdef INVARIANTS
5517 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5518#else
5519 asoc->size_on_all_streams = 0;
5520#endif
5521 }
5522 sctp_ucount_decr(asoc->cnt_on_all_streams);
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005523 control->on_strm_q = 0;
5524 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5525 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5526 control->on_strm_q = 0;
5527#ifdef INVARIANTS
5528 } else if (control->on_strm_q) {
5529 panic("strm: %p ctl: %p unknown %d",
5530 strm, control, control->on_strm_q);
5531#endif
5532 }
5533 control->on_strm_q = 0;
Michael Tuexene5001952016-04-17 19:25:27 +02005534 if (control->on_read_q == 0) {
5535 sctp_free_remote_addr(control->whoFrom);
5536 if (control->data) {
5537 sctp_m_freem(control->data);
5538 control->data = NULL;
tuexendd729232011-11-01 23:04:43 +00005539 }
Michael Tuexene5001952016-04-17 19:25:27 +02005540 sctp_free_a_readq(stcb, control);
tuexendd729232011-11-01 23:04:43 +00005541 }
5542}
5543
tuexendd729232011-11-01 23:04:43 +00005544void
5545sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5546 struct sctp_forward_tsn_chunk *fwd,
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005547 int *abort_flag, struct mbuf *m , int offset)
tuexendd729232011-11-01 23:04:43 +00005548{
5549 /* The pr-sctp fwd tsn */
5550 /*
5551 * here we will perform all the data receiver side steps for
5552 * processing FwdTSN, as required in by pr-sctp draft:
5553 *
5554 * Assume we get FwdTSN(x):
5555 *
Michael Tuexend3331282020-02-03 23:14:00 +01005556 * 1) update local cumTSN to x
5557 * 2) try to further advance cumTSN to x + others we have
5558 * 3) examine and update re-ordering queue on pr-in-streams
5559 * 4) clean up re-assembly queue
Michael Tuexene64d7732016-07-17 15:21:06 +02005560 * 5) Send a sack to report where we are.
tuexendd729232011-11-01 23:04:43 +00005561 */
5562 struct sctp_association *asoc;
5563 uint32_t new_cum_tsn, gap;
tuexen9784e9a2011-12-18 13:04:23 +00005564 unsigned int i, fwd_sz, m_size;
tuexendd729232011-11-01 23:04:43 +00005565 uint32_t str_seq;
5566 struct sctp_stream_in *strm;
Michael Tuexen91409502020-11-09 14:23:07 +01005567 struct sctp_queued_to_read *control, *ncontrol, *sv;
tuexendd729232011-11-01 23:04:43 +00005568
tuexendd729232011-11-01 23:04:43 +00005569 asoc = &stcb->asoc;
5570 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5571 SCTPDBG(SCTP_DEBUG_INDATA1,
5572 "Bad size too small/big fwd-tsn\n");
5573 return;
5574 }
5575 m_size = (stcb->asoc.mapping_array_size << 3);
5576 /*************************************************************/
5577 /* 1. Here we update local cumTSN and shift the bitmap array */
5578 /*************************************************************/
5579 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5580
5581 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5582 /* Already got there ... */
5583 return;
5584 }
5585 /*
5586 * now we know the new TSN is more advanced, let's find the actual
5587 * gap
5588 */
5589 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5590 asoc->cumulative_tsn = new_cum_tsn;
5591 if (gap >= m_size) {
5592 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
t00fcxen08f9ff92014-03-16 13:38:54 +00005593 struct mbuf *op_err;
5594 char msg[SCTP_DIAG_INFO_LEN];
5595
tuexendd729232011-11-01 23:04:43 +00005596 /*
5597 * out of range (of single byte chunks in the rwnd I
5598 * give out). This must be an attacker.
5599 */
5600 *abort_flag = 1;
Michael Tuexenedd369d2020-05-19 09:42:15 +02005601 SCTP_SNPRINTF(msg, sizeof(msg),
5602 "New cum ack %8.8x too high, highest TSN %8.8x",
5603 new_cum_tsn, asoc->highest_tsn_inside_map);
t00fcxen08f9ff92014-03-16 13:38:54 +00005604 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexen555c8e82020-07-23 03:43:26 +02005605 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37;
Michael Tuexen1ade45c2021-07-09 23:32:42 +02005606 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00005607 return;
5608 }
5609 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
tuexen15f99d82012-04-19 16:08:38 +00005610
tuexendd729232011-11-01 23:04:43 +00005611 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5612 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5613 asoc->highest_tsn_inside_map = new_cum_tsn;
tuexen15f99d82012-04-19 16:08:38 +00005614
tuexendd729232011-11-01 23:04:43 +00005615 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5616 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
tuexen15f99d82012-04-19 16:08:38 +00005617
tuexendd729232011-11-01 23:04:43 +00005618 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5619 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5620 }
5621 } else {
5622 SCTP_TCB_LOCK_ASSERT(stcb);
5623 for (i = 0; i <= gap; i++) {
5624 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5625 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5626 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5627 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5628 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5629 }
5630 }
5631 }
5632 }
5633 /*************************************************************/
5634 /* 2. Clear up re-assembly queue */
5635 /*************************************************************/
tuexendd729232011-11-01 23:04:43 +00005636
Michael Tuexene5001952016-04-17 19:25:27 +02005637 /* This is now done as part of clearing up the stream/seq */
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005638 if (asoc->idata_supported == 0) {
5639 uint16_t sid;
Michael Tuexenb10a5d32020-07-09 18:25:16 +02005640
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005641 /* Flush all the un-ordered data based on cum-tsn */
5642 SCTP_INP_READ_LOCK(stcb->sctp_ep);
Michael Tuexen196fbbe2020-09-28 17:04:25 +02005643 for (sid = 0; sid < asoc->streamincnt; sid++) {
Michael Tuexenb10a5d32020-07-09 18:25:16 +02005644 strm = &asoc->strmin[sid];
5645 if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5646 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn);
5647 }
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005648 }
5649 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5650 }
tuexendd729232011-11-01 23:04:43 +00005651 /*******************************************************/
5652 /* 3. Update the PR-stream re-ordering queues and fix */
5653 /* delivery issues as needed. */
5654 /*******************************************************/
5655 fwd_sz -= sizeof(*fwd);
5656 if (m && fwd_sz) {
5657 /* New method. */
5658 unsigned int num_str;
Michael Tuexenb10a5d32020-07-09 18:25:16 +02005659 uint32_t mid;
Michael Tuexen00657ac2016-12-07 21:53:26 +01005660 uint16_t sid;
Michael Tuexene64d7732016-07-17 15:21:06 +02005661 uint16_t ordered, flags;
tuexendd729232011-11-01 23:04:43 +00005662 struct sctp_strseq *stseq, strseqbuf;
Michael Tuexene5001952016-04-17 19:25:27 +02005663 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
tuexendd729232011-11-01 23:04:43 +00005664 offset += sizeof(*fwd);
5665
5666 SCTP_INP_READ_LOCK(stcb->sctp_ep);
Michael Tuexene5001952016-04-17 19:25:27 +02005667 if (asoc->idata_supported) {
5668 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
Michael Tuexene5001952016-04-17 19:25:27 +02005669 } else {
5670 num_str = fwd_sz / sizeof(struct sctp_strseq);
Michael Tuexene5001952016-04-17 19:25:27 +02005671 }
tuexendd729232011-11-01 23:04:43 +00005672 for (i = 0; i < num_str; i++) {
Michael Tuexene5001952016-04-17 19:25:27 +02005673 if (asoc->idata_supported) {
5674 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5675 sizeof(struct sctp_strseq_mid),
5676 (uint8_t *)&strseqbuf_m);
5677 offset += sizeof(struct sctp_strseq_mid);
5678 if (stseq_m == NULL) {
5679 break;
5680 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01005681 sid = ntohs(stseq_m->sid);
5682 mid = ntohl(stseq_m->mid);
Michael Tuexene64d7732016-07-17 15:21:06 +02005683 flags = ntohs(stseq_m->flags);
5684 if (flags & PR_SCTP_UNORDERED_FLAG) {
5685 ordered = 0;
5686 } else {
5687 ordered = 1;
5688 }
Michael Tuexene5001952016-04-17 19:25:27 +02005689 } else {
5690 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5691 sizeof(struct sctp_strseq),
5692 (uint8_t *)&strseqbuf);
5693 offset += sizeof(struct sctp_strseq);
5694 if (stseq == NULL) {
5695 break;
5696 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01005697 sid = ntohs(stseq->sid);
5698 mid = (uint32_t)ntohs(stseq->ssn);
Michael Tuexene64d7732016-07-17 15:21:06 +02005699 ordered = 1;
tuexendd729232011-11-01 23:04:43 +00005700 }
5701 /* Convert */
tuexendd729232011-11-01 23:04:43 +00005702
5703 /* now process */
5704
5705 /*
5706 * Ok we now look for the stream/seq on the read queue
5707 * where its not all delivered. If we find it we transmute the
5708 * read entry into a PDI_ABORTED.
5709 */
Michael Tuexen00657ac2016-12-07 21:53:26 +01005710 if (sid >= asoc->streamincnt) {
tuexendd729232011-11-01 23:04:43 +00005711 /* screwed up streams, stop! */
5712 break;
5713 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01005714 if ((asoc->str_of_pdapi == sid) &&
5715 (asoc->ssn_of_pdapi == mid)) {
tuexendd729232011-11-01 23:04:43 +00005716 /* If this is the one we were partially delivering
5717 * now then we no longer are. Note this will change
5718 * with the reassembly re-write.
5719 */
5720 asoc->fragmented_delivery_inprogress = 0;
5721 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01005722 strm = &asoc->strmin[sid];
Michael Tuexenb10a5d32020-07-09 18:25:16 +02005723 if (ordered) {
Michael Tuexen91409502020-11-09 14:23:07 +01005724 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, ncontrol) {
Michael Tuexenb10a5d32020-07-09 18:25:16 +02005725 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5726 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5727 }
5728 }
5729 } else {
5730 if (asoc->idata_supported) {
Michael Tuexen91409502020-11-09 14:23:07 +01005731 TAILQ_FOREACH_SAFE(control, &strm->uno_inqueue, next_instrm, ncontrol) {
Michael Tuexenb10a5d32020-07-09 18:25:16 +02005732 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5733 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5734 }
5735 }
5736 } else {
5737 if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5738 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn);
5739 }
5740 }
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005741 }
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005742 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5743 if ((control->sinfo_stream == sid) &&
5744 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01005745 str_seq = (sid << 16) | (0x0000ffff & mid);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005746 control->pdapi_aborted = 1;
tuexendd729232011-11-01 23:04:43 +00005747 sv = stcb->asoc.control_pdapi;
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005748 control->end_added = 1;
5749 if (control->on_strm_q == SCTP_ON_ORDERED) {
5750 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5751 if (asoc->size_on_all_streams >= control->length) {
5752 asoc->size_on_all_streams -= control->length;
5753 } else {
Michael Tuexenf6d20c52016-04-18 11:31:05 +02005754#ifdef INVARIANTS
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005755 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5756#else
5757 asoc->size_on_all_streams = 0;
5758#endif
5759 }
5760 sctp_ucount_decr(asoc->cnt_on_all_streams);
5761 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5762 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5763#ifdef INVARIANTS
5764 } else if (control->on_strm_q) {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02005765 panic("strm: %p ctl: %p unknown %d",
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005766 strm, control, control->on_strm_q);
Michael Tuexenf6d20c52016-04-18 11:31:05 +02005767#endif
Michael Tuexene5001952016-04-17 19:25:27 +02005768 }
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005769 control->on_strm_q = 0;
5770 stcb->asoc.control_pdapi = control;
tuexendd729232011-11-01 23:04:43 +00005771 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5772 stcb,
5773 SCTP_PARTIAL_DELIVERY_ABORTED,
5774 (void *)&str_seq,
5775 SCTP_SO_NOT_LOCKED);
5776 stcb->asoc.control_pdapi = sv;
5777 break;
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005778 } else if ((control->sinfo_stream == sid) &&
5779 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
tuexendd729232011-11-01 23:04:43 +00005780 /* We are past our victim SSN */
5781 break;
5782 }
5783 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01005784 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
tuexendd729232011-11-01 23:04:43 +00005785 /* Update the sequence number */
Michael Tuexen00657ac2016-12-07 21:53:26 +01005786 strm->last_mid_delivered = mid;
tuexendd729232011-11-01 23:04:43 +00005787 }
5788 /* now kick the stream the new way */
Michael Tuexene5001952016-04-17 19:25:27 +02005789 /*sa_ignore NO_NULL_CHK*/
tuexendd729232011-11-01 23:04:43 +00005790 sctp_kick_prsctp_reorder_queue(stcb, strm);
5791 }
5792 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5793 }
5794 /*
5795 * Now slide thing forward.
5796 */
5797 sctp_slide_mapping_arrays(stcb);
tuexendd729232011-11-01 23:04:43 +00005798}