blob: de18d7a0ebe28fbd79f5fb25406d5523064916b3 [file] [log] [blame]
tuexendd729232011-11-01 23:04:43 +00001/*-
Michael Tuexen866a7312017-11-24 12:44:05 +01002 * SPDX-License-Identifier: BSD-3-Clause
3 *
tuexendd729232011-11-01 23:04:43 +00004 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
tuexen194eae12012-05-23 12:03:48 +00005 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
tuexendd729232011-11-01 23:04:43 +00007 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * a) Redistributions of source code must retain the above copyright notice,
tuexen9784e9a2011-12-18 13:04:23 +000012 * this list of conditions and the following disclaimer.
tuexendd729232011-11-01 23:04:43 +000013 *
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
tuexen9784e9a2011-12-18 13:04:23 +000016 * the documentation and/or other materials provided with the distribution.
tuexendd729232011-11-01 23:04:43 +000017 *
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
tuexendd729232011-11-01 23:04:43 +000035#ifdef __FreeBSD__
36#include <sys/cdefs.h>
Michael Tüxen703ca422019-07-22 14:13:53 -040037__FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.c 350216 2019-07-22 18:11:35Z tuexen $");
tuexendd729232011-11-01 23:04:43 +000038#endif
39
40#include <netinet/sctp_os.h>
Michael Tuexene5001952016-04-17 19:25:27 +020041#ifdef __FreeBSD__
42#include <sys/proc.h>
43#endif
tuexendd729232011-11-01 23:04:43 +000044#include <netinet/sctp_var.h>
45#include <netinet/sctp_sysctl.h>
Michael Tuexen3121b802016-04-10 23:28:19 +020046#include <netinet/sctp_header.h>
Michael Tuexene5001952016-04-17 19:25:27 +020047#include <netinet/sctp_pcb.h>
tuexendd729232011-11-01 23:04:43 +000048#include <netinet/sctputil.h>
49#include <netinet/sctp_output.h>
Michael Tuexen3121b802016-04-10 23:28:19 +020050#include <netinet/sctp_uio.h>
Michael Tuexene5001952016-04-17 19:25:27 +020051#include <netinet/sctp_auth.h>
Michael Tuexen3121b802016-04-10 23:28:19 +020052#include <netinet/sctp_timer.h>
Michael Tuexene5001952016-04-17 19:25:27 +020053#include <netinet/sctp_asconf.h>
54#include <netinet/sctp_indata.h>
55#include <netinet/sctp_bsd_addr.h>
56#include <netinet/sctp_input.h>
57#include <netinet/sctp_crc32.h>
58#ifdef __FreeBSD__
59#include <netinet/sctp_lock_bsd.h>
60#endif
tuexendd729232011-11-01 23:04:43 +000061/*
62 * NOTES: On the outbound side of things I need to check the sack timer to
63 * see if I should generate a sack into the chunk queue (if I have data to
64 * send that is and will be sending it .. for bundling.
65 *
66 * The callback in sctp_usrreq.c will get called when the socket is read from.
67 * This will cause sctp_service_queues() to get called on the top entry in
68 * the list.
69 */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +020070static uint32_t
Michael Tuexene5001952016-04-17 19:25:27 +020071sctp_add_chk_to_control(struct sctp_queued_to_read *control,
72 struct sctp_stream_in *strm,
73 struct sctp_tcb *stcb,
74 struct sctp_association *asoc,
Michael Tuexenfdcf7902016-08-06 14:39:31 +020075 struct sctp_tmit_chunk *chk, int lock_held);
Michael Tuexene5001952016-04-17 19:25:27 +020076
tuexendd729232011-11-01 23:04:43 +000077
78void
79sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
80{
81 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
82}
83
84/* Calculate what the rwnd would be */
85uint32_t
86sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
87{
tuexen63fc0bb2011-12-27 12:24:52 +000088 uint32_t calc = 0;
tuexendd729232011-11-01 23:04:43 +000089
90 /*
91 * This is really set wrong with respect to a 1-2-m socket. Since
92 * the sb_cc is the count that everyone as put up. When we re-write
93 * sctp_soreceive then we will fix this so that ONLY this
94 * associations data is taken into account.
95 */
Michael Tuexene5001952016-04-17 19:25:27 +020096 if (stcb->sctp_socket == NULL) {
tuexendd729232011-11-01 23:04:43 +000097 return (calc);
Michael Tuexene5001952016-04-17 19:25:27 +020098 }
tuexendd729232011-11-01 23:04:43 +000099
Michael Tuexencdba1262017-11-05 13:05:10 +0100100 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
101 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
102 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
103 ("size_on_all_streams is %u", asoc->size_on_all_streams));
tuexendd729232011-11-01 23:04:43 +0000104 if (stcb->asoc.sb_cc == 0 &&
Michael Tuexencdba1262017-11-05 13:05:10 +0100105 asoc->cnt_on_reasm_queue == 0 &&
106 asoc->cnt_on_all_streams == 0) {
tuexendd729232011-11-01 23:04:43 +0000107 /* Full rwnd granted */
108 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
109 return (calc);
110 }
111 /* get actual space */
112 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
tuexendd729232011-11-01 23:04:43 +0000113 /*
114 * take out what has NOT been put on socket queue and we yet hold
115 * for putting up.
116 */
117 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
118 asoc->cnt_on_reasm_queue * MSIZE));
119 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
120 asoc->cnt_on_all_streams * MSIZE));
tuexendd729232011-11-01 23:04:43 +0000121 if (calc == 0) {
122 /* out of space */
123 return (calc);
124 }
125
126 /* what is the overhead of all these rwnd's */
127 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
128 /* If the window gets too small due to ctrl-stuff, reduce it
129 * to 1, even it is 0. SWS engaged
130 */
131 if (calc < stcb->asoc.my_rwnd_control_len) {
132 calc = 1;
133 }
134 return (calc);
135}
136
137
138
139/*
140 * Build out our readq entry based on the incoming packet.
141 */
142struct sctp_queued_to_read *
143sctp_build_readq_entry(struct sctp_tcb *stcb,
144 struct sctp_nets *net,
145 uint32_t tsn, uint32_t ppid,
Michael Tuexen00657ac2016-12-07 21:53:26 +0100146 uint32_t context, uint16_t sid,
147 uint32_t mid, uint8_t flags,
tuexendd729232011-11-01 23:04:43 +0000148 struct mbuf *dm)
149{
150 struct sctp_queued_to_read *read_queue_e = NULL;
151
152 sctp_alloc_a_readq(stcb, read_queue_e);
153 if (read_queue_e == NULL) {
154 goto failed_build;
155 }
Michael Tuexene5001952016-04-17 19:25:27 +0200156 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
Michael Tuexen00657ac2016-12-07 21:53:26 +0100157 read_queue_e->sinfo_stream = sid;
tuexendd729232011-11-01 23:04:43 +0000158 read_queue_e->sinfo_flags = (flags << 8);
159 read_queue_e->sinfo_ppid = ppid;
tuexen9784e9a2011-12-18 13:04:23 +0000160 read_queue_e->sinfo_context = context;
tuexendd729232011-11-01 23:04:43 +0000161 read_queue_e->sinfo_tsn = tsn;
162 read_queue_e->sinfo_cumtsn = tsn;
163 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
Michael Tuexen00657ac2016-12-07 21:53:26 +0100164 read_queue_e->mid = mid;
Michael Tuexene5001952016-04-17 19:25:27 +0200165 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
166 TAILQ_INIT(&read_queue_e->reasm);
tuexendd729232011-11-01 23:04:43 +0000167 read_queue_e->whoFrom = net;
tuexendd729232011-11-01 23:04:43 +0000168 atomic_add_int(&net->ref_count, 1);
169 read_queue_e->data = dm;
tuexendd729232011-11-01 23:04:43 +0000170 read_queue_e->stcb = stcb;
171 read_queue_e->port_from = stcb->rport;
tuexendd729232011-11-01 23:04:43 +0000172failed_build:
173 return (read_queue_e);
174}
175
tuexendd729232011-11-01 23:04:43 +0000176struct mbuf *
177sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
178{
179 struct sctp_extrcvinfo *seinfo;
180 struct sctp_sndrcvinfo *outinfo;
181 struct sctp_rcvinfo *rcvinfo;
182 struct sctp_nxtinfo *nxtinfo;
t00fcxen8d8ec792012-09-04 22:31:29 +0000183#if defined(__Userspace_os_Windows)
184 WSACMSGHDR *cmh;
185#else
tuexendd729232011-11-01 23:04:43 +0000186 struct cmsghdr *cmh;
t00fcxen8d8ec792012-09-04 22:31:29 +0000187#endif
tuexendd729232011-11-01 23:04:43 +0000188 struct mbuf *ret;
189 int len;
190 int use_extended;
191 int provide_nxt;
192
193 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
194 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
195 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
196 /* user does not want any ancillary data */
197 return (NULL);
198 }
199
200 len = 0;
201 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
202 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
203 }
204 seinfo = (struct sctp_extrcvinfo *)sinfo;
205 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
Michael Tuexene8185522015-11-06 14:17:33 +0100206 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
tuexendd729232011-11-01 23:04:43 +0000207 provide_nxt = 1;
Michael Tuexen94656502015-11-06 23:14:32 +0100208 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
tuexendd729232011-11-01 23:04:43 +0000209 } else {
210 provide_nxt = 0;
211 }
212 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
213 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
214 use_extended = 1;
215 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
216 } else {
217 use_extended = 0;
218 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
219 }
220 } else {
221 use_extended = 0;
222 }
223
t00fcxen23c2b8f2012-12-10 20:15:50 +0000224 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
tuexendd729232011-11-01 23:04:43 +0000225 if (ret == NULL) {
226 /* No space */
227 return (ret);
228 }
229 SCTP_BUF_LEN(ret) = 0;
230
231 /* We need a CMSG header followed by the struct */
t00fcxen8d8ec792012-09-04 22:31:29 +0000232#if defined(__Userspace_os_Windows)
233 cmh = mtod(ret, WSACMSGHDR *);
234#else
tuexendd729232011-11-01 23:04:43 +0000235 cmh = mtod(ret, struct cmsghdr *);
t00fcxen8d8ec792012-09-04 22:31:29 +0000236#endif
t00fcxen6b2685d2014-07-11 06:33:20 +0000237 /*
238 * Make sure that there is no un-initialized padding between
239 * the cmsg header and cmsg data and after the cmsg data.
240 */
241 memset(cmh, 0, len);
tuexendd729232011-11-01 23:04:43 +0000242 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
243 cmh->cmsg_level = IPPROTO_SCTP;
244 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
245 cmh->cmsg_type = SCTP_RCVINFO;
246 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
247 rcvinfo->rcv_sid = sinfo->sinfo_stream;
248 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
249 rcvinfo->rcv_flags = sinfo->sinfo_flags;
250 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
251 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
252 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
253 rcvinfo->rcv_context = sinfo->sinfo_context;
254 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
t00fcxen8d8ec792012-09-04 22:31:29 +0000255#if defined(__Userspace_os_Windows)
256 cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
257#else
tuexendd729232011-11-01 23:04:43 +0000258 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
t00fcxen8d8ec792012-09-04 22:31:29 +0000259#endif
tuexendd729232011-11-01 23:04:43 +0000260 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
261 }
262 if (provide_nxt) {
263 cmh->cmsg_level = IPPROTO_SCTP;
264 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
265 cmh->cmsg_type = SCTP_NXTINFO;
266 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
Michael Tuexene8185522015-11-06 14:17:33 +0100267 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
tuexendd729232011-11-01 23:04:43 +0000268 nxtinfo->nxt_flags = 0;
Michael Tuexene8185522015-11-06 14:17:33 +0100269 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
tuexendd729232011-11-01 23:04:43 +0000270 nxtinfo->nxt_flags |= SCTP_UNORDERED;
271 }
Michael Tuexene8185522015-11-06 14:17:33 +0100272 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
tuexendd729232011-11-01 23:04:43 +0000273 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
274 }
Michael Tuexene8185522015-11-06 14:17:33 +0100275 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
tuexendd729232011-11-01 23:04:43 +0000276 nxtinfo->nxt_flags |= SCTP_COMPLETE;
277 }
Michael Tuexene8185522015-11-06 14:17:33 +0100278 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
279 nxtinfo->nxt_length = seinfo->serinfo_next_length;
280 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
t00fcxen8d8ec792012-09-04 22:31:29 +0000281#if defined(__Userspace_os_Windows)
282 cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
283#else
tuexendd729232011-11-01 23:04:43 +0000284 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
t00fcxen8d8ec792012-09-04 22:31:29 +0000285#endif
tuexendd729232011-11-01 23:04:43 +0000286 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
287 }
288 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
289 cmh->cmsg_level = IPPROTO_SCTP;
290 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
291 if (use_extended) {
292 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
293 cmh->cmsg_type = SCTP_EXTRCV;
294 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
295 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
296 } else {
297 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
298 cmh->cmsg_type = SCTP_SNDRCV;
299 *outinfo = *sinfo;
300 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
301 }
302 }
303 return (ret);
304}
305
306
307static void
308sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
309{
310 uint32_t gap, i, cumackp1;
311 int fnd = 0;
Michael Tuexene5001952016-04-17 19:25:27 +0200312 int in_r=0, in_nr=0;
tuexendd729232011-11-01 23:04:43 +0000313 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
314 return;
315 }
316 cumackp1 = asoc->cumulative_tsn + 1;
317 if (SCTP_TSN_GT(cumackp1, tsn)) {
318 /* this tsn is behind the cum ack and thus we don't
319 * need to worry about it being moved from one to the other.
320 */
321 return;
322 }
323 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
Michael Tuexene5001952016-04-17 19:25:27 +0200324 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
325 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
326 if ((in_r == 0) && (in_nr == 0)) {
327#ifdef INVARIANTS
328 panic("Things are really messed up now");
329#else
tuexencb5fe8d2012-05-04 09:50:27 +0000330 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
tuexendd729232011-11-01 23:04:43 +0000331 sctp_print_mapping_array(asoc);
tuexendd729232011-11-01 23:04:43 +0000332#endif
333 }
Michael Tuexene5001952016-04-17 19:25:27 +0200334 if (in_nr == 0)
335 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
336 if (in_r)
337 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
tuexendd729232011-11-01 23:04:43 +0000338 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
339 asoc->highest_tsn_inside_nr_map = tsn;
340 }
341 if (tsn == asoc->highest_tsn_inside_map) {
342 /* We must back down to see what the new highest is */
343 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
344 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
345 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
346 asoc->highest_tsn_inside_map = i;
347 fnd = 1;
348 break;
349 }
350 }
351 if (!fnd) {
352 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
353 }
354 }
355}
356
Michael Tuexene5001952016-04-17 19:25:27 +0200357static int
358sctp_place_control_in_stream(struct sctp_stream_in *strm,
359 struct sctp_association *asoc,
360 struct sctp_queued_to_read *control)
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200361{
Michael Tuexene5001952016-04-17 19:25:27 +0200362 struct sctp_queued_to_read *at;
363 struct sctp_readhead *q;
Michael Tuexendbfc1b82016-12-11 14:57:19 +0100364 uint8_t flags, unordered;
Michael Tuexen3121b802016-04-10 23:28:19 +0200365
Michael Tuexendbfc1b82016-12-11 14:57:19 +0100366 flags = (control->sinfo_flags >> 8);
367 unordered = flags & SCTP_DATA_UNORDERED;
Michael Tuexene5001952016-04-17 19:25:27 +0200368 if (unordered) {
369 q = &strm->uno_inqueue;
370 if (asoc->idata_supported == 0) {
371 if (!TAILQ_EMPTY(q)) {
372 /* Only one stream can be here in old style -- abort */
373 return (-1);
Michael Tuexen3121b802016-04-10 23:28:19 +0200374 }
Michael Tuexene5001952016-04-17 19:25:27 +0200375 TAILQ_INSERT_TAIL(q, control, next_instrm);
376 control->on_strm_q = SCTP_ON_UNORDERED;
377 return (0);
Michael Tuexen3121b802016-04-10 23:28:19 +0200378 }
Michael Tuexene5001952016-04-17 19:25:27 +0200379 } else {
380 q = &strm->inqueue;
Michael Tuexen3121b802016-04-10 23:28:19 +0200381 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +0100382 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
383 control->end_added = 1;
384 control->first_frag_seen = 1;
385 control->last_frag_seen = 1;
Michael Tuexene5001952016-04-17 19:25:27 +0200386 }
387 if (TAILQ_EMPTY(q)) {
388 /* Empty queue */
389 TAILQ_INSERT_HEAD(q, control, next_instrm);
390 if (unordered) {
391 control->on_strm_q = SCTP_ON_UNORDERED;
Michael Tuexen3121b802016-04-10 23:28:19 +0200392 } else {
Michael Tuexene5001952016-04-17 19:25:27 +0200393 control->on_strm_q = SCTP_ON_ORDERED;
394 }
395 return (0);
396 } else {
397 TAILQ_FOREACH(at, q, next_instrm) {
Michael Tuexen00657ac2016-12-07 21:53:26 +0100398 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
Michael Tuexen3121b802016-04-10 23:28:19 +0200399 /*
Michael Tuexene5001952016-04-17 19:25:27 +0200400 * one in queue is bigger than the
401 * new one, insert before this one
Michael Tuexen3121b802016-04-10 23:28:19 +0200402 */
Michael Tuexene5001952016-04-17 19:25:27 +0200403 TAILQ_INSERT_BEFORE(at, control, next_instrm);
404 if (unordered) {
405 control->on_strm_q = SCTP_ON_UNORDERED;
Michael Tuexen3121b802016-04-10 23:28:19 +0200406 } else {
Michael Tuexene5001952016-04-17 19:25:27 +0200407 control->on_strm_q = SCTP_ON_ORDERED ;
Michael Tuexen3121b802016-04-10 23:28:19 +0200408 }
Michael Tuexene5001952016-04-17 19:25:27 +0200409 break;
Michael Tuexen00657ac2016-12-07 21:53:26 +0100410 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
Michael Tuexene5001952016-04-17 19:25:27 +0200411 /*
412 * Gak, He sent me a duplicate msg
413 * id number?? return -1 to abort.
414 */
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200415 return (-1);
Michael Tuexene5001952016-04-17 19:25:27 +0200416 } else {
417 if (TAILQ_NEXT(at, next_instrm) == NULL) {
418 /*
419 * We are at the end, insert
420 * it after this one
421 */
422 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
423 sctp_log_strm_del(control, at,
424 SCTP_STR_LOG_FROM_INSERT_TL);
425 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +0100426 TAILQ_INSERT_AFTER(q, at, control, next_instrm);
Michael Tuexene5001952016-04-17 19:25:27 +0200427 if (unordered) {
428 control->on_strm_q = SCTP_ON_UNORDERED ;
429 } else {
430 control->on_strm_q = SCTP_ON_ORDERED ;
431 }
Michael Tuexen3121b802016-04-10 23:28:19 +0200432 break;
433 }
Michael Tuexen3121b802016-04-10 23:28:19 +0200434 }
Michael Tuexen3121b802016-04-10 23:28:19 +0200435 }
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200436 }
Michael Tuexene5001952016-04-17 19:25:27 +0200437 return (0);
438}
439
440static void
441sctp_abort_in_reasm(struct sctp_tcb *stcb,
Michael Tuexene5001952016-04-17 19:25:27 +0200442 struct sctp_queued_to_read *control,
443 struct sctp_tmit_chunk *chk,
444 int *abort_flag, int opspot)
445{
446 char msg[SCTP_DIAG_INFO_LEN];
447 struct mbuf *oper;
Michael Tuexena9d8c472016-04-18 22:22:59 +0200448
Michael Tuexene5001952016-04-17 19:25:27 +0200449 if (stcb->asoc.idata_supported) {
450 snprintf(msg, sizeof(msg),
451 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
452 opspot,
453 control->fsn_included,
Michael Tuexen00657ac2016-12-07 21:53:26 +0100454 chk->rec.data.tsn,
455 chk->rec.data.sid,
456 chk->rec.data.fsn, chk->rec.data.mid);
Michael Tuexene5001952016-04-17 19:25:27 +0200457 } else {
458 snprintf(msg, sizeof(msg),
Michael Tuexena9d8c472016-04-18 22:22:59 +0200459 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
Michael Tuexene5001952016-04-17 19:25:27 +0200460 opspot,
461 control->fsn_included,
Michael Tuexen00657ac2016-12-07 21:53:26 +0100462 chk->rec.data.tsn,
463 chk->rec.data.sid,
464 chk->rec.data.fsn,
465 (uint16_t)chk->rec.data.mid);
Michael Tuexene5001952016-04-17 19:25:27 +0200466 }
467 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
468 sctp_m_freem(chk->data);
469 chk->data = NULL;
470 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
471 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
472 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
473 *abort_flag = 1;
474}
475
476static void
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200477sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
Michael Tuexene5001952016-04-17 19:25:27 +0200478{
479 /*
480 * The control could not be placed and must be cleaned.
481 */
482 struct sctp_tmit_chunk *chk, *nchk;
483 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
484 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
485 if (chk->data)
486 sctp_m_freem(chk->data);
487 chk->data = NULL;
488 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
489 }
490 sctp_free_a_readq(stcb, control);
tuexendd729232011-11-01 23:04:43 +0000491}
492
493/*
494 * Queue the chunk either right into the socket buffer if it is the next one
495 * to go OR put it in the correct place in the delivery queue. If we do
Michael Tuexene5001952016-04-17 19:25:27 +0200496 * append to the so_buf, keep doing so until we are out of order as
497 * long as the control's entered are non-fragmented.
tuexendd729232011-11-01 23:04:43 +0000498 */
499static void
Michael Tuexene5001952016-04-17 19:25:27 +0200500sctp_queue_data_to_stream(struct sctp_tcb *stcb,
Michael Tuexene5001952016-04-17 19:25:27 +0200501 struct sctp_association *asoc,
502 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
tuexendd729232011-11-01 23:04:43 +0000503{
504 /*
505 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
506 * all the data in one stream this could happen quite rapidly. One
507 * could use the TSN to keep track of things, but this scheme breaks
Michael Tuexen34488e72016-05-03 22:11:59 +0200508 * down in the other type of stream usage that could occur. Send a
tuexendd729232011-11-01 23:04:43 +0000509 * single msg to stream 0, send 4Billion messages to stream 1, now
510 * send a message to stream 0. You have a situation where the TSN
511 * has wrapped but not in the stream. Is this worth worrying about
512 * or should we just change our queue sort at the bottom to be by
513 * TSN.
514 *
515 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
516 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
517 * assignment this could happen... and I don't see how this would be
518 * a violation. So for now I am undecided an will leave the sort by
519 * SSN alone. Maybe a hybred approach is the answer
520 *
521 */
tuexendd729232011-11-01 23:04:43 +0000522 struct sctp_queued_to_read *at;
523 int queue_needed;
Michael Tuexene5001952016-04-17 19:25:27 +0200524 uint32_t nxt_todel;
t00fcxen08f9ff92014-03-16 13:38:54 +0000525 struct mbuf *op_err;
Michael Tuexene411f662016-12-17 23:36:21 +0100526 struct sctp_stream_in *strm;
t00fcxen08f9ff92014-03-16 13:38:54 +0000527 char msg[SCTP_DIAG_INFO_LEN];
tuexen15f99d82012-04-19 16:08:38 +0000528
Michael Tuexene411f662016-12-17 23:36:21 +0100529 strm = &asoc->strmin[control->sinfo_stream];
tuexendd729232011-11-01 23:04:43 +0000530 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
531 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
532 }
Michael Tuexen00657ac2016-12-07 21:53:26 +0100533 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
tuexendd729232011-11-01 23:04:43 +0000534 /* The incoming sseq is behind where we last delivered? */
Michael Tuexeneccb4be2016-04-18 08:58:59 +0200535 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
Michael Tuexene411f662016-12-17 23:36:21 +0100536 strm->last_mid_delivered, control->mid);
tuexendd729232011-11-01 23:04:43 +0000537 /*
538 * throw it in the stream so it gets cleaned up in
539 * association destruction
540 */
Michael Tuexene5001952016-04-17 19:25:27 +0200541 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
Michael Tuexen00657ac2016-12-07 21:53:26 +0100542 if (asoc->idata_supported) {
543 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
544 strm->last_mid_delivered, control->sinfo_tsn,
545 control->sinfo_stream, control->mid);
546 } else {
547 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
548 (uint16_t)strm->last_mid_delivered,
549 control->sinfo_tsn,
550 control->sinfo_stream,
551 (uint16_t)control->mid);
552 }
t00fcxen08f9ff92014-03-16 13:38:54 +0000553 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexene5001952016-04-17 19:25:27 +0200554 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
t00fcxen08f9ff92014-03-16 13:38:54 +0000555 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +0000556 *abort_flag = 1;
557 return;
558
559 }
Michael Tuexene5001952016-04-17 19:25:27 +0200560 queue_needed = 1;
561 asoc->size_on_all_streams += control->length;
562 sctp_ucount_incr(asoc->cnt_on_all_streams);
Michael Tuexen00657ac2016-12-07 21:53:26 +0100563 nxt_todel = strm->last_mid_delivered + 1;
564 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
Michael Tuexene5001952016-04-17 19:25:27 +0200565#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
566 struct socket *so;
567
568 so = SCTP_INP_SO(stcb->sctp_ep);
569 atomic_add_int(&stcb->asoc.refcnt, 1);
570 SCTP_TCB_UNLOCK(stcb);
571 SCTP_SOCKET_LOCK(so, 1);
572 SCTP_TCB_LOCK(stcb);
573 atomic_subtract_int(&stcb->asoc.refcnt, 1);
574 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
575 SCTP_SOCKET_UNLOCK(so, 1);
576 return;
577 }
578#endif
tuexendd729232011-11-01 23:04:43 +0000579 /* can be delivered right away? */
580 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
581 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
582 }
Michael Tuexene411f662016-12-17 23:36:21 +0100583 /* EY it wont be queued if it could be delivered directly */
tuexendd729232011-11-01 23:04:43 +0000584 queue_needed = 0;
Michael Tuexenbe5e3e72017-07-19 14:44:48 +0200585 if (asoc->size_on_all_streams >= control->length) {
586 asoc->size_on_all_streams -= control->length;
587 } else {
588#ifdef INVARIANTS
589 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
590#else
591 asoc->size_on_all_streams = 0;
592#endif
593 }
tuexendd729232011-11-01 23:04:43 +0000594 sctp_ucount_decr(asoc->cnt_on_all_streams);
Michael Tuexen00657ac2016-12-07 21:53:26 +0100595 strm->last_mid_delivered++;
tuexendd729232011-11-01 23:04:43 +0000596 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
597 sctp_add_to_readq(stcb->sctp_ep, stcb,
598 control,
599 &stcb->sctp_socket->so_rcv, 1,
Michael Tuexen6ecb9e42016-05-14 15:47:51 +0200600 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +0200601 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
tuexendd729232011-11-01 23:04:43 +0000602 /* all delivered */
Michael Tuexen00657ac2016-12-07 21:53:26 +0100603 nxt_todel = strm->last_mid_delivered + 1;
604 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
Michael Tuexene5001952016-04-17 19:25:27 +0200605 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
Michael Tuexene5001952016-04-17 19:25:27 +0200606 if (control->on_strm_q == SCTP_ON_ORDERED) {
607 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +0200608 if (asoc->size_on_all_streams >= control->length) {
609 asoc->size_on_all_streams -= control->length;
610 } else {
611#ifdef INVARIANTS
612 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
613#else
614 asoc->size_on_all_streams = 0;
615#endif
616 }
617 sctp_ucount_decr(asoc->cnt_on_all_streams);
Michael Tuexenf6d20c52016-04-18 11:31:05 +0200618#ifdef INVARIANTS
Michael Tuexene5001952016-04-17 19:25:27 +0200619 } else {
Michael Tuexeneccb4be2016-04-18 08:58:59 +0200620 panic("Huh control: %p is on_strm_q: %d",
Michael Tuexene5001952016-04-17 19:25:27 +0200621 control, control->on_strm_q);
Michael Tuexenf6d20c52016-04-18 11:31:05 +0200622#endif
Michael Tuexene5001952016-04-17 19:25:27 +0200623 }
624 control->on_strm_q = 0;
Michael Tuexen00657ac2016-12-07 21:53:26 +0100625 strm->last_mid_delivered++;
tuexendd729232011-11-01 23:04:43 +0000626 /*
627 * We ignore the return of deliver_data here
628 * since we always can hold the chunk on the
629 * d-queue. And we have a finite number that
630 * can be delivered from the strq.
631 */
632 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
633 sctp_log_strm_del(control, NULL,
634 SCTP_STR_LOG_FROM_IMMED_DEL);
635 }
636 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
637 sctp_add_to_readq(stcb->sctp_ep, stcb,
638 control,
639 &stcb->sctp_socket->so_rcv, 1,
640 SCTP_READ_LOCK_NOT_HELD,
Michael Tuexen6ecb9e42016-05-14 15:47:51 +0200641 SCTP_SO_LOCKED);
tuexendd729232011-11-01 23:04:43 +0000642 continue;
Michael Tuexen00657ac2016-12-07 21:53:26 +0100643 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
Michael Tuexene5001952016-04-17 19:25:27 +0200644 *need_reasm = 1;
tuexendd729232011-11-01 23:04:43 +0000645 }
646 break;
647 }
Michael Tuexene5001952016-04-17 19:25:27 +0200648#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
649 SCTP_SOCKET_UNLOCK(so, 1);
650#endif
tuexendd729232011-11-01 23:04:43 +0000651 }
652 if (queue_needed) {
653 /*
654 * Ok, we did not deliver this guy, find the correct place
655 * to put it on the queue.
656 */
Michael Tuexene5001952016-04-17 19:25:27 +0200657 if (sctp_place_control_in_stream(strm, asoc, control)) {
Michael Tuexene5001952016-04-17 19:25:27 +0200658 snprintf(msg, sizeof(msg),
Michael Tuexen00657ac2016-12-07 21:53:26 +0100659 "Queue to str MID: %u duplicate",
660 control->mid);
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200661 sctp_clean_up_control(stcb, control);
Michael Tuexena9d8c472016-04-18 22:22:59 +0200662 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexene5001952016-04-17 19:25:27 +0200663 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
Michael Tuexena9d8c472016-04-18 22:22:59 +0200664 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +0200665 *abort_flag = 1;
tuexendd729232011-11-01 23:04:43 +0000666 }
667 }
Michael Tuexen3121b802016-04-10 23:28:19 +0200668}
669
Michael Tuexen3121b802016-04-10 23:28:19 +0200670
Michael Tuexene5001952016-04-17 19:25:27 +0200671static void
672sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
673{
674 struct mbuf *m, *prev = NULL;
675 struct sctp_tcb *stcb;
676
677 stcb = control->stcb;
678 control->held_length = 0;
679 control->length = 0;
680 m = control->data;
681 while (m) {
682 if (SCTP_BUF_LEN(m) == 0) {
683 /* Skip mbufs with NO length */
684 if (prev == NULL) {
685 /* First one */
686 control->data = sctp_m_free(m);
687 m = control->data;
688 } else {
689 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
690 m = SCTP_BUF_NEXT(prev);
691 }
692 if (m == NULL) {
693 control->tail_mbuf = prev;
694 }
695 continue;
Michael Tuexen3121b802016-04-10 23:28:19 +0200696 }
Michael Tuexene5001952016-04-17 19:25:27 +0200697 prev = m;
698 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
699 if (control->on_read_q) {
700 /*
701 * On read queue so we must increment the
702 * SB stuff, we assume caller has done any locks of SB.
703 */
704 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
Michael Tuexen3121b802016-04-10 23:28:19 +0200705 }
Michael Tuexene5001952016-04-17 19:25:27 +0200706 m = SCTP_BUF_NEXT(m);
Michael Tuexen3121b802016-04-10 23:28:19 +0200707 }
Michael Tuexene5001952016-04-17 19:25:27 +0200708 if (prev) {
709 control->tail_mbuf = prev;
710 }
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200711}
712
713static void
Michael Tuexenbe5e3e72017-07-19 14:44:48 +0200714sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200715{
Michael Tuexene5001952016-04-17 19:25:27 +0200716 struct mbuf *prev=NULL;
717 struct sctp_tcb *stcb;
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200718
Michael Tuexene5001952016-04-17 19:25:27 +0200719 stcb = control->stcb;
720 if (stcb == NULL) {
Michael Tuexenf6d20c52016-04-18 11:31:05 +0200721#ifdef INVARIANTS
Michael Tuexene5001952016-04-17 19:25:27 +0200722 panic("Control broken");
Michael Tuexenf6d20c52016-04-18 11:31:05 +0200723#else
724 return;
725#endif
Michael Tuexene5001952016-04-17 19:25:27 +0200726 }
727 if (control->tail_mbuf == NULL) {
728 /* TSNH */
729 control->data = m;
730 sctp_setup_tail_pointer(control);
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200731 return;
732 }
Michael Tuexene5001952016-04-17 19:25:27 +0200733 control->tail_mbuf->m_next = m;
734 while (m) {
735 if (SCTP_BUF_LEN(m) == 0) {
736 /* Skip mbufs with NO length */
737 if (prev == NULL) {
738 /* First one */
739 control->tail_mbuf->m_next = sctp_m_free(m);
740 m = control->tail_mbuf->m_next;
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200741 } else {
Michael Tuexene5001952016-04-17 19:25:27 +0200742 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
743 m = SCTP_BUF_NEXT(prev);
Michael Tuexen3121b802016-04-10 23:28:19 +0200744 }
Michael Tuexene5001952016-04-17 19:25:27 +0200745 if (m == NULL) {
746 control->tail_mbuf = prev;
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200747 }
Michael Tuexene5001952016-04-17 19:25:27 +0200748 continue;
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200749 }
Michael Tuexene5001952016-04-17 19:25:27 +0200750 prev = m;
751 if (control->on_read_q) {
752 /*
753 * On read queue so we must increment the
754 * SB stuff, we assume caller has done any locks of SB.
Michael Tuexen3121b802016-04-10 23:28:19 +0200755 */
Michael Tuexene5001952016-04-17 19:25:27 +0200756 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
757 }
Michael Tuexenbe5e3e72017-07-19 14:44:48 +0200758 *added += SCTP_BUF_LEN(m);
Michael Tuexene5001952016-04-17 19:25:27 +0200759 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
760 m = SCTP_BUF_NEXT(m);
761 }
762 if (prev) {
763 control->tail_mbuf = prev;
764 }
765}
766
767static void
768sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
769{
770 memset(nc, 0, sizeof(struct sctp_queued_to_read));
771 nc->sinfo_stream = control->sinfo_stream;
Michael Tuexen00657ac2016-12-07 21:53:26 +0100772 nc->mid = control->mid;
Michael Tuexene5001952016-04-17 19:25:27 +0200773 TAILQ_INIT(&nc->reasm);
774 nc->top_fsn = control->top_fsn;
Michael Tuexen00657ac2016-12-07 21:53:26 +0100775 nc->mid = control->mid;
Michael Tuexene5001952016-04-17 19:25:27 +0200776 nc->sinfo_flags = control->sinfo_flags;
777 nc->sinfo_ppid = control->sinfo_ppid;
778 nc->sinfo_context = control->sinfo_context;
779 nc->fsn_included = 0xffffffff;
780 nc->sinfo_tsn = control->sinfo_tsn;
781 nc->sinfo_cumtsn = control->sinfo_cumtsn;
782 nc->sinfo_assoc_id = control->sinfo_assoc_id;
783 nc->whoFrom = control->whoFrom;
784 atomic_add_int(&nc->whoFrom->ref_count, 1);
785 nc->stcb = control->stcb;
786 nc->port_from = control->port_from;
787}
788
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200789static void
790sctp_reset_a_control(struct sctp_queued_to_read *control,
791 struct sctp_inpcb *inp, uint32_t tsn)
792{
793 control->fsn_included = tsn;
794 if (control->on_read_q) {
795 /*
796 * We have to purge it from there,
797 * hopefully this will work :-)
798 */
799 TAILQ_REMOVE(&inp->read_queue, control, next);
800 control->on_read_q = 0;
801 }
802}
803
Michael Tuexene5001952016-04-17 19:25:27 +0200804static int
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200805sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
806 struct sctp_association *asoc,
807 struct sctp_stream_in *strm,
808 struct sctp_queued_to_read *control,
809 uint32_t pd_point,
810 int inp_read_lock_held)
Michael Tuexene5001952016-04-17 19:25:27 +0200811{
812 /* Special handling for the old un-ordered data chunk.
Michael Tuexen00657ac2016-12-07 21:53:26 +0100813 * All the chunks/TSN's go to mid 0. So
Michael Tuexene5001952016-04-17 19:25:27 +0200814 * we have to do the old style watching to see
815 * if we have it all. If you return one, no other
816 * control entries on the un-ordered queue will
817 * be looked at. In theory there should be no others
818 * entries in reality, unless the guy is sending both
819 * unordered NDATA and unordered DATA...
820 */
821 struct sctp_tmit_chunk *chk, *lchk, *tchk;
822 uint32_t fsn;
Michael Tuexenc0d26d92016-07-16 12:22:25 +0200823 struct sctp_queued_to_read *nc;
Michael Tuexene5001952016-04-17 19:25:27 +0200824 int cnt_added;
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200825
Michael Tuexene5001952016-04-17 19:25:27 +0200826 if (control->first_frag_seen == 0) {
827 /* Nothing we can do, we have not seen the first piece yet */
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200828 return (1);
Michael Tuexene5001952016-04-17 19:25:27 +0200829 }
830 /* Collapse any we can */
831 cnt_added = 0;
832restart:
833 fsn = control->fsn_included + 1;
834 /* Now what can we add? */
835 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
Michael Tuexen00657ac2016-12-07 21:53:26 +0100836 if (chk->rec.data.fsn == fsn) {
Michael Tuexene5001952016-04-17 19:25:27 +0200837 /* Ok lets add it */
Michael Tuexenc0d26d92016-07-16 12:22:25 +0200838 sctp_alloc_a_readq(stcb, nc);
839 if (nc == NULL) {
840 break;
841 }
Michael Tuexen2b62a392016-07-16 13:59:54 +0200842 memset(nc, 0, sizeof(struct sctp_queued_to_read));
Michael Tuexene5001952016-04-17 19:25:27 +0200843 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200844 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
Michael Tuexene5001952016-04-17 19:25:27 +0200845 fsn++;
846 cnt_added++;
847 chk = NULL;
848 if (control->end_added) {
849 /* We are done */
850 if (!TAILQ_EMPTY(&control->reasm)) {
851 /*
852 * Ok we have to move anything left on
853 * the control queue to a new control.
854 */
Michael Tuexene5001952016-04-17 19:25:27 +0200855 sctp_build_readq_entry_from_ctl(nc, control);
856 tchk = TAILQ_FIRST(&control->reasm);
857 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
858 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +0200859 if (asoc->size_on_reasm_queue >= tchk->send_size) {
860 asoc->size_on_reasm_queue -= tchk->send_size;
861 } else {
862#ifdef INVARIANTS
863 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
864#else
865 asoc->size_on_reasm_queue = 0;
866#endif
867 }
Michael Tuexen93e6e552016-09-22 16:25:12 +0200868 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
Michael Tuexene5001952016-04-17 19:25:27 +0200869 nc->first_frag_seen = 1;
Michael Tuexen00657ac2016-12-07 21:53:26 +0100870 nc->fsn_included = tchk->rec.data.fsn;
Michael Tuexene5001952016-04-17 19:25:27 +0200871 nc->data = tchk->data;
Michael Tuexen00657ac2016-12-07 21:53:26 +0100872 nc->sinfo_ppid = tchk->rec.data.ppid;
873 nc->sinfo_tsn = tchk->rec.data.tsn;
874 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
Michael Tuexene5001952016-04-17 19:25:27 +0200875 tchk->data = NULL;
876 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
877 sctp_setup_tail_pointer(nc);
878 tchk = TAILQ_FIRST(&control->reasm);
879 }
880 /* Spin the rest onto the queue */
881 while (tchk) {
882 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
883 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
884 tchk = TAILQ_FIRST(&control->reasm);
885 }
886 /* Now lets add it to the queue after removing control */
887 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
888 nc->on_strm_q = SCTP_ON_UNORDERED;
889 if (control->on_strm_q) {
890 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
891 control->on_strm_q = 0;
892 }
893 }
Michael Tuexene5001952016-04-17 19:25:27 +0200894 if (control->pdapi_started) {
895 strm->pd_api_started = 0;
896 control->pdapi_started = 0;
897 }
898 if (control->on_strm_q) {
899 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
900 control->on_strm_q = 0;
Michael Tuexen07cc2ed2016-07-17 10:33:57 +0200901 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
Michael Tuexene5001952016-04-17 19:25:27 +0200902 }
Michael Tuexenb0298bf2016-04-27 21:01:33 +0200903 if (control->on_read_q == 0) {
904 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
905 &stcb->sctp_socket->so_rcv, control->end_added,
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200906 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
Michael Tuexen65394262016-05-09 08:17:54 +0200907#if defined(__Userspace__)
908 } else {
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200909 sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held);
Michael Tuexen65394262016-05-09 08:17:54 +0200910#endif
Michael Tuexenb0298bf2016-04-27 21:01:33 +0200911 }
Michael Tuexena9d8c472016-04-18 22:22:59 +0200912 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
Michael Tuexen2a74c792016-07-19 13:20:24 +0200913 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
Michael Tuexene5001952016-04-17 19:25:27 +0200914 /* Switch to the new guy and continue */
915 control = nc;
Michael Tuexene5001952016-04-17 19:25:27 +0200916 goto restart;
Michael Tuexenc0d26d92016-07-16 12:22:25 +0200917 } else {
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200918 if (nc->on_strm_q == 0) {
919 sctp_free_a_readq(stcb, nc);
920 }
Michael Tuexene5001952016-04-17 19:25:27 +0200921 }
922 return (1);
Michael Tuexenc0d26d92016-07-16 12:22:25 +0200923 } else {
924 sctp_free_a_readq(stcb, nc);
Michael Tuexene5001952016-04-17 19:25:27 +0200925 }
926 } else {
927 /* Can't add more */
928 break;
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200929 }
Michael Tuexen48ebe5e2016-04-10 23:10:14 +0200930 }
Michael Tüxen703ca422019-07-22 14:13:53 -0400931 if (cnt_added && strm->pd_api_started) {
932#if defined(__Userspace__)
933 sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD);
934#endif
935 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
936 }
Michael Tuexene5001952016-04-17 19:25:27 +0200937 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
Michael Tuexenb0298bf2016-04-27 21:01:33 +0200938 strm->pd_api_started = 1;
939 control->pdapi_started = 1;
Michael Tuexene5001952016-04-17 19:25:27 +0200940 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
941 &stcb->sctp_socket->so_rcv, control->end_added,
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200942 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
Michael Tuexena9d8c472016-04-18 22:22:59 +0200943 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +0200944 return (0);
945 } else {
946 return (1);
947 }
948}
949
950static void
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200951sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
952 struct sctp_association *asoc,
953 struct sctp_queued_to_read *control,
954 struct sctp_tmit_chunk *chk,
955 int *abort_flag)
Michael Tuexene5001952016-04-17 19:25:27 +0200956{
957 struct sctp_tmit_chunk *at;
Michael Tuexenfdcf7902016-08-06 14:39:31 +0200958 int inserted;
Michael Tuexene5001952016-04-17 19:25:27 +0200959 /*
960 * Here we need to place the chunk into the control structure
961 * sorted in the correct order.
962 */
963 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
964 /* Its the very first one. */
965 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +0200966 "chunk is a first fsn: %u becomes fsn_included\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +0100967 chk->rec.data.fsn);
Michael Tuexen6dcb0e02019-03-26 14:08:49 +0100968 at = TAILQ_FIRST(&control->reasm);
969 if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
970 /*
971 * The first chunk in the reassembly is
972 * a smaller TSN than this one, even though
973 * this has a first, it must be from a subsequent
974 * msg.
975 */
976 goto place_chunk;
977 }
Michael Tuexene5001952016-04-17 19:25:27 +0200978 if (control->first_frag_seen) {
979 /*
980 * In old un-ordered we can reassembly on
981 * one control multiple messages. As long
982 * as the next FIRST is greater then the old
983 * first (TSN i.e. FSN wise)
984 */
985 struct mbuf *tdata;
986 uint32_t tmp;
987
Michael Tuexen00657ac2016-12-07 21:53:26 +0100988 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
Michael Tuexene5001952016-04-17 19:25:27 +0200989 /* Easy way the start of a new guy beyond the lowest */
990 goto place_chunk;
991 }
Michael Tuexen00657ac2016-12-07 21:53:26 +0100992 if ((chk->rec.data.fsn == control->fsn_included) ||
Michael Tuexene5001952016-04-17 19:25:27 +0200993 (control->pdapi_started)) {
994 /*
995 * Ok this should not happen, if it does
996 * we started the pd-api on the higher TSN (since
997 * the equals part is a TSN failure it must be that).
998 *
999 * We are completly hosed in that case since I have
1000 * no way to recover. This really will only happen
1001 * if we can get more TSN's higher before the pd-api-point.
1002 */
Michael Tuexena9d8c472016-04-18 22:22:59 +02001003 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001004 abort_flag,
1005 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
1006
1007 return;
1008 }
1009 /*
1010 * Ok we have two firsts and the one we just got
1011 * is smaller than the one we previously placed.. yuck!
1012 * We must swap them out.
1013 */
1014 /* swap the mbufs */
1015 tdata = control->data;
1016 control->data = chk->data;
1017 chk->data = tdata;
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001018 /* Save the lengths */
1019 chk->send_size = control->length;
1020 /* Recompute length of control and tail pointer */
1021 sctp_setup_tail_pointer(control);
Michael Tuexene5001952016-04-17 19:25:27 +02001022 /* Fix the FSN included */
1023 tmp = control->fsn_included;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001024 control->fsn_included = chk->rec.data.fsn;
1025 chk->rec.data.fsn = tmp;
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001026 /* Fix the TSN included */
1027 tmp = control->sinfo_tsn;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001028 control->sinfo_tsn = chk->rec.data.tsn;
1029 chk->rec.data.tsn = tmp;
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001030 /* Fix the PPID included */
1031 tmp = control->sinfo_ppid;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001032 control->sinfo_ppid = chk->rec.data.ppid;
1033 chk->rec.data.ppid = tmp;
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001034 /* Fix tail pointer */
Michael Tuexene5001952016-04-17 19:25:27 +02001035 goto place_chunk;
1036 }
1037 control->first_frag_seen = 1;
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001038 control->fsn_included = chk->rec.data.fsn;
1039 control->top_fsn = chk->rec.data.fsn;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001040 control->sinfo_tsn = chk->rec.data.tsn;
1041 control->sinfo_ppid = chk->rec.data.ppid;
Michael Tuexene5001952016-04-17 19:25:27 +02001042 control->data = chk->data;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001043 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001044 chk->data = NULL;
1045 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1046 sctp_setup_tail_pointer(control);
1047 return;
1048 }
1049place_chunk:
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001050 inserted = 0;
Michael Tuexene5001952016-04-17 19:25:27 +02001051 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01001052 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001053 /*
1054 * This one in queue is bigger than the new one, insert
1055 * the new one before at.
1056 */
1057 asoc->size_on_reasm_queue += chk->send_size;
1058 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1059 inserted = 1;
1060 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1061 break;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001062 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
Michael Tuexene5001952016-04-17 19:25:27 +02001063 /*
1064 * They sent a duplicate fsn number. This
1065 * really should not happen since the FSN is
1066 * a TSN and it should have been dropped earlier.
1067 */
Michael Tuexena9d8c472016-04-18 22:22:59 +02001068 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001069 abort_flag,
1070 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1071 return;
1072 }
1073
1074 }
1075 if (inserted == 0) {
1076 /* Its at the end */
1077 asoc->size_on_reasm_queue += chk->send_size;
1078 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
Michael Tuexen00657ac2016-12-07 21:53:26 +01001079 control->top_fsn = chk->rec.data.fsn;
Michael Tuexene5001952016-04-17 19:25:27 +02001080 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1081 }
1082}
1083
1084static int
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001085sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1086 struct sctp_stream_in *strm, int inp_read_lock_held)
Michael Tuexene5001952016-04-17 19:25:27 +02001087{
1088 /*
1089 * Given a stream, strm, see if any of
1090 * the SSN's on it that are fragmented
1091 * are ready to deliver. If so go ahead
1092 * and place them on the read queue. In
1093 * so placing if we have hit the end, then
1094 * we need to remove them from the stream's queue.
1095 */
Michael Tuexene411f662016-12-17 23:36:21 +01001096 struct sctp_queued_to_read *control, *nctl = NULL;
Michael Tuexene5001952016-04-17 19:25:27 +02001097 uint32_t next_to_del;
1098 uint32_t pd_point;
1099 int ret = 0;
1100
1101 if (stcb->sctp_socket) {
1102 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1103 stcb->sctp_ep->partial_delivery_point);
1104 } else {
1105 pd_point = stcb->sctp_ep->partial_delivery_point;
1106 }
1107 control = TAILQ_FIRST(&strm->uno_inqueue);
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001108
Michael Tuexene411f662016-12-17 23:36:21 +01001109 if ((control != NULL) &&
Michael Tuexene5001952016-04-17 19:25:27 +02001110 (asoc->idata_supported == 0)) {
1111 /* Special handling needed for "old" data format */
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001112 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001113 goto done_un;
1114 }
1115 }
1116 if (strm->pd_api_started) {
1117 /* Can't add more */
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001118 return (0);
Michael Tuexene5001952016-04-17 19:25:27 +02001119 }
1120 while (control) {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001121 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001122 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
Michael Tuexene5001952016-04-17 19:25:27 +02001123 nctl = TAILQ_NEXT(control, next_instrm);
1124 if (control->end_added) {
1125 /* We just put the last bit on */
1126 if (control->on_strm_q) {
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001127#ifdef INVARIANTS
Michael Tuexene5001952016-04-17 19:25:27 +02001128 if (control->on_strm_q != SCTP_ON_UNORDERED ) {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001129 panic("Huh control: %p on_q: %d -- not unordered?",
Michael Tuexene5001952016-04-17 19:25:27 +02001130 control, control->on_strm_q);
1131 }
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001132#endif
Michael Tuexen07cc2ed2016-07-17 10:33:57 +02001133 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
Michael Tuexene5001952016-04-17 19:25:27 +02001134 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1135 control->on_strm_q = 0;
1136 }
1137 if (control->on_read_q == 0) {
1138 sctp_add_to_readq(stcb->sctp_ep, stcb,
1139 control,
1140 &stcb->sctp_socket->so_rcv, control->end_added,
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001141 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +02001142 }
1143 } else {
1144 /* Can we do a PD-API for this un-ordered guy? */
1145 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1146 strm->pd_api_started = 1;
1147 control->pdapi_started = 1;
1148 sctp_add_to_readq(stcb->sctp_ep, stcb,
1149 control,
1150 &stcb->sctp_socket->so_rcv, control->end_added,
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001151 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
Michael Tuexenea37a472017-12-08 00:07:41 +01001152
Michael Tuexene5001952016-04-17 19:25:27 +02001153 break;
1154 }
1155 }
1156 control = nctl;
1157 }
1158done_un:
1159 control = TAILQ_FIRST(&strm->inqueue);
1160 if (strm->pd_api_started) {
1161 /* Can't add more */
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001162 return (0);
Michael Tuexene5001952016-04-17 19:25:27 +02001163 }
1164 if (control == NULL) {
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001165 return (ret);
Michael Tuexene5001952016-04-17 19:25:27 +02001166 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01001167 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001168 /* Ok the guy at the top was being partially delivered
1169 * completed, so we remove it. Note
1170 * the pd_api flag was taken off when the
1171 * chunk was merged on in sctp_queue_data_for_reasm below.
1172 */
1173 nctl = TAILQ_NEXT(control, next_instrm);
1174 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001175 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001176 control, control->end_added, control->mid,
Michael Tuexene5001952016-04-17 19:25:27 +02001177 control->top_fsn, control->fsn_included,
Michael Tuexen00657ac2016-12-07 21:53:26 +01001178 strm->last_mid_delivered);
Michael Tuexene5001952016-04-17 19:25:27 +02001179 if (control->end_added) {
1180 if (control->on_strm_q) {
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001181#ifdef INVARIANTS
Michael Tuexene5001952016-04-17 19:25:27 +02001182 if (control->on_strm_q != SCTP_ON_ORDERED ) {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001183 panic("Huh control: %p on_q: %d -- not ordered?",
Michael Tuexene5001952016-04-17 19:25:27 +02001184 control, control->on_strm_q);
1185 }
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001186#endif
Michael Tuexen07cc2ed2016-07-17 10:33:57 +02001187 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
Michael Tuexene5001952016-04-17 19:25:27 +02001188 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001189 if (asoc->size_on_all_streams >= control->length) {
1190 asoc->size_on_all_streams -= control->length;
1191 } else {
1192#ifdef INVARIANTS
1193 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1194#else
1195 asoc->size_on_all_streams = 0;
1196#endif
1197 }
1198 sctp_ucount_decr(asoc->cnt_on_all_streams);
Michael Tuexene5001952016-04-17 19:25:27 +02001199 control->on_strm_q = 0;
1200 }
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001201 if (strm->pd_api_started && control->pdapi_started) {
1202 control->pdapi_started = 0;
1203 strm->pd_api_started = 0;
1204 }
Michael Tuexene5001952016-04-17 19:25:27 +02001205 if (control->on_read_q == 0) {
1206 sctp_add_to_readq(stcb->sctp_ep, stcb,
1207 control,
1208 &stcb->sctp_socket->so_rcv, control->end_added,
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001209 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +02001210 }
Michael Tuexene5001952016-04-17 19:25:27 +02001211 control = nctl;
1212 }
1213 }
1214 if (strm->pd_api_started) {
1215 /* Can't add more must have gotten an un-ordered above being partially delivered. */
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001216 return (0);
Michael Tuexene5001952016-04-17 19:25:27 +02001217 }
1218deliver_more:
Michael Tuexen00657ac2016-12-07 21:53:26 +01001219 next_to_del = strm->last_mid_delivered + 1;
Michael Tuexene5001952016-04-17 19:25:27 +02001220 if (control) {
1221 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001222 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001223 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
Michael Tuexene5001952016-04-17 19:25:27 +02001224 next_to_del);
1225 nctl = TAILQ_NEXT(control, next_instrm);
Michael Tuexen00657ac2016-12-07 21:53:26 +01001226 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
Michael Tuexene5001952016-04-17 19:25:27 +02001227 (control->first_frag_seen)) {
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001228 int done;
1229
Michael Tuexene5001952016-04-17 19:25:27 +02001230 /* Ok we can deliver it onto the stream. */
1231 if (control->end_added) {
1232 /* We are done with it afterwards */
1233 if (control->on_strm_q) {
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001234#ifdef INVARIANTS
Michael Tuexene5001952016-04-17 19:25:27 +02001235 if (control->on_strm_q != SCTP_ON_ORDERED ) {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001236 panic("Huh control: %p on_q: %d -- not ordered?",
Michael Tuexene5001952016-04-17 19:25:27 +02001237 control, control->on_strm_q);
1238 }
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001239#endif
Michael Tuexen07cc2ed2016-07-17 10:33:57 +02001240 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
Michael Tuexene5001952016-04-17 19:25:27 +02001241 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001242 if (asoc->size_on_all_streams >= control->length) {
1243 asoc->size_on_all_streams -= control->length;
1244 } else {
1245#ifdef INVARIANTS
1246 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1247#else
1248 asoc->size_on_all_streams = 0;
1249#endif
1250 }
1251 sctp_ucount_decr(asoc->cnt_on_all_streams);
Michael Tuexene5001952016-04-17 19:25:27 +02001252 control->on_strm_q = 0;
1253 }
1254 ret++;
1255 }
1256 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1257 /* A singleton now slipping through - mark it non-revokable too */
1258 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1259 } else if (control->end_added == 0) {
1260 /* Check if we can defer adding until its all there */
1261 if ((control->length < pd_point) || (strm->pd_api_started)) {
1262 /* Don't need it or cannot add more (one being delivered that way) */
1263 goto out;
1264 }
1265 }
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001266 done = (control->end_added) && (control->last_frag_seen);
Michael Tuexene5001952016-04-17 19:25:27 +02001267 if (control->on_read_q == 0) {
Michael Tuexencdba1262017-11-05 13:05:10 +01001268 if (!done) {
1269 if (asoc->size_on_all_streams >= control->length) {
1270 asoc->size_on_all_streams -= control->length;
1271 } else {
1272#ifdef INVARIANTS
1273 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1274#else
1275 asoc->size_on_all_streams = 0;
1276#endif
1277 }
1278 strm->pd_api_started = 1;
1279 control->pdapi_started = 1;
1280 }
Michael Tuexene5001952016-04-17 19:25:27 +02001281 sctp_add_to_readq(stcb->sctp_ep, stcb,
1282 control,
1283 &stcb->sctp_socket->so_rcv, control->end_added,
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001284 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
Michael Tuexene5001952016-04-17 19:25:27 +02001285 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01001286 strm->last_mid_delivered = next_to_del;
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001287 if (done) {
Michael Tuexene5001952016-04-17 19:25:27 +02001288 control = nctl;
1289 goto deliver_more;
Michael Tuexene5001952016-04-17 19:25:27 +02001290 }
1291 }
1292 }
1293out:
1294 return (ret);
1295}
1296
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001297
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001298uint32_t
Michael Tuexene5001952016-04-17 19:25:27 +02001299sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1300 struct sctp_stream_in *strm,
1301 struct sctp_tcb *stcb, struct sctp_association *asoc,
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001302 struct sctp_tmit_chunk *chk, int hold_rlock)
Michael Tuexene5001952016-04-17 19:25:27 +02001303{
1304 /*
1305 * Given a control and a chunk, merge the
1306 * data from the chk onto the control and free
1307 * up the chunk resources.
1308 */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001309 uint32_t added=0;
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001310 int i_locked = 0;
Michael Tuexene5001952016-04-17 19:25:27 +02001311
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001312 if (control->on_read_q && (hold_rlock == 0)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001313 /*
1314 * Its being pd-api'd so we must
1315 * do some locks.
1316 */
1317 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1318 i_locked = 1;
1319 }
1320 if (control->data == NULL) {
1321 control->data = chk->data;
1322 sctp_setup_tail_pointer(control);
1323 } else {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001324 sctp_add_to_tail_pointer(control, chk->data, &added);
Michael Tuexene5001952016-04-17 19:25:27 +02001325 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01001326 control->fsn_included = chk->rec.data.fsn;
Michael Tuexene5001952016-04-17 19:25:27 +02001327 asoc->size_on_reasm_queue -= chk->send_size;
1328 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
Michael Tuexen00657ac2016-12-07 21:53:26 +01001329 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001330 chk->data = NULL;
1331 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1332 control->first_frag_seen = 1;
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001333 control->sinfo_tsn = chk->rec.data.tsn;
1334 control->sinfo_ppid = chk->rec.data.ppid;
Michael Tuexene5001952016-04-17 19:25:27 +02001335 }
1336 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1337 /* Its complete */
1338 if ((control->on_strm_q) && (control->on_read_q)) {
1339 if (control->pdapi_started) {
1340 control->pdapi_started = 0;
1341 strm->pd_api_started = 0;
1342 }
1343 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1344 /* Unordered */
1345 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1346 control->on_strm_q = 0;
1347 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1348 /* Ordered */
1349 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
Michael Tuexencdba1262017-11-05 13:05:10 +01001350 /*
1351 * Don't need to decrement size_on_all_streams,
1352 * since control is on the read queue.
1353 */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001354 sctp_ucount_decr(asoc->cnt_on_all_streams);
Michael Tuexene5001952016-04-17 19:25:27 +02001355 control->on_strm_q = 0;
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001356#ifdef INVARIANTS
Michael Tuexene5001952016-04-17 19:25:27 +02001357 } else if (control->on_strm_q) {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001358 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
Michael Tuexene5001952016-04-17 19:25:27 +02001359 control->on_strm_q);
Michael Tuexenf6d20c52016-04-18 11:31:05 +02001360#endif
Michael Tuexene5001952016-04-17 19:25:27 +02001361 }
1362 }
1363 control->end_added = 1;
1364 control->last_frag_seen = 1;
1365 }
1366 if (i_locked) {
1367 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1368 }
Michael Tuexen98b74552016-05-09 17:41:56 +02001369 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001370 return (added);
tuexendd729232011-11-01 23:04:43 +00001371}
1372
1373/*
1374 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1375 * queue, see if anthing can be delivered. If so pull it off (or as much as
1376 * we can. If we run out of space then we must dump what we can and set the
1377 * appropriate flag to say we queued what we could.
1378 */
1379static void
1380sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
Michael Tuexene5001952016-04-17 19:25:27 +02001381 struct sctp_queued_to_read *control,
1382 struct sctp_tmit_chunk *chk,
1383 int created_control,
1384 int *abort_flag, uint32_t tsn)
tuexendd729232011-11-01 23:04:43 +00001385{
Michael Tuexene5001952016-04-17 19:25:27 +02001386 uint32_t next_fsn;
1387 struct sctp_tmit_chunk *at, *nat;
Michael Tuexene411f662016-12-17 23:36:21 +01001388 struct sctp_stream_in *strm;
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001389 int do_wakeup, unordered;
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001390 uint32_t lenadded;
tuexendd729232011-11-01 23:04:43 +00001391
Michael Tuexene411f662016-12-17 23:36:21 +01001392 strm = &asoc->strmin[control->sinfo_stream];
Michael Tuexene5001952016-04-17 19:25:27 +02001393 /*
1394 * For old un-ordered data chunks.
1395 */
1396 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1397 unordered = 1;
1398 } else {
1399 unordered = 0;
1400 }
1401 /* Must be added to the stream-in queue */
1402 if (created_control) {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001403 if (unordered == 0) {
1404 sctp_ucount_incr(asoc->cnt_on_all_streams);
1405 }
Michael Tuexene5001952016-04-17 19:25:27 +02001406 if (sctp_place_control_in_stream(strm, asoc, control)) {
1407 /* Duplicate SSN? */
Michael Tuexena9d8c472016-04-18 22:22:59 +02001408 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001409 abort_flag,
1410 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
Michael Tuexen663a17e2017-09-21 11:49:43 +02001411 sctp_clean_up_control(stcb, control);
Michael Tuexene5001952016-04-17 19:25:27 +02001412 return;
1413 }
1414 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1415 /* Ok we created this control and now
1416 * lets validate that its legal i.e. there
1417 * is a B bit set, if not and we have
1418 * up to the cum-ack then its invalid.
1419 */
1420 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
Michael Tuexena9d8c472016-04-18 22:22:59 +02001421 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001422 abort_flag,
1423 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1424 return;
tuexendd729232011-11-01 23:04:43 +00001425 }
1426 }
Michael Tuexene5001952016-04-17 19:25:27 +02001427 }
Michael Tuexen34a90e22016-04-18 11:35:26 +02001428 if ((asoc->idata_supported == 0) && (unordered == 1)) {
Michael Tuexenfdcf7902016-08-06 14:39:31 +02001429 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
tuexendd729232011-11-01 23:04:43 +00001430 return;
1431 }
Michael Tuexene5001952016-04-17 19:25:27 +02001432 /*
1433 * Ok we must queue the chunk into the reasembly portion:
1434 * o if its the first it goes to the control mbuf.
1435 * o if its not first but the next in sequence it goes to the control,
1436 * and each succeeding one in order also goes.
1437 * o if its not in order we place it on the list in its place.
1438 */
1439 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1440 /* Its the very first one. */
1441 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001442 "chunk is a first fsn: %u becomes fsn_included\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001443 chk->rec.data.fsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001444 if (control->first_frag_seen) {
tuexendd729232011-11-01 23:04:43 +00001445 /*
Michael Tuexene5001952016-04-17 19:25:27 +02001446 * Error on senders part, they either
1447 * sent us two data chunks with FIRST,
1448 * or they sent two un-ordered chunks that
1449 * were fragmented at the same time in the same stream.
tuexendd729232011-11-01 23:04:43 +00001450 */
Michael Tuexena9d8c472016-04-18 22:22:59 +02001451 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001452 abort_flag,
1453 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
Michael Tuexen3121b802016-04-10 23:28:19 +02001454 return;
Michael Tuexene5001952016-04-17 19:25:27 +02001455 }
1456 control->first_frag_seen = 1;
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001457 control->sinfo_ppid = chk->rec.data.ppid;
1458 control->sinfo_tsn = chk->rec.data.tsn;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001459 control->fsn_included = chk->rec.data.fsn;
Michael Tuexene5001952016-04-17 19:25:27 +02001460 control->data = chk->data;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001461 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001462 chk->data = NULL;
1463 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1464 sctp_setup_tail_pointer(control);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001465 asoc->size_on_all_streams += control->length;
Michael Tuexene5001952016-04-17 19:25:27 +02001466 } else {
1467 /* Place the chunk in our list */
1468 int inserted=0;
Michael Tuexen34a90e22016-04-18 11:35:26 +02001469 if (control->last_frag_seen == 0) {
Michael Tuexene5001952016-04-17 19:25:27 +02001470 /* Still willing to raise highest FSN seen */
Michael Tuexen00657ac2016-12-07 21:53:26 +01001471 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001472 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001473 "We have a new top_fsn: %u\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001474 chk->rec.data.fsn);
1475 control->top_fsn = chk->rec.data.fsn;
Michael Tuexene5001952016-04-17 19:25:27 +02001476 }
1477 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1478 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001479 "The last fsn is now in place fsn: %u\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001480 chk->rec.data.fsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001481 control->last_frag_seen = 1;
1482 }
1483 if (asoc->idata_supported || control->first_frag_seen) {
1484 /*
1485 * For IDATA we always check since we know that
1486 * the first fragment is 0. For old DATA we have
Michael Tuexen34488e72016-05-03 22:11:59 +02001487 * to receive the first before we know the first FSN
Michael Tuexene5001952016-04-17 19:25:27 +02001488 * (which is the TSN).
Michael Tuexen3121b802016-04-10 23:28:19 +02001489 */
Michael Tuexen00657ac2016-12-07 21:53:26 +01001490 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001491 /* We have already delivered up to this so its a dup */
Michael Tuexena9d8c472016-04-18 22:22:59 +02001492 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001493 abort_flag,
1494 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1495 return;
1496 }
1497 }
1498 } else {
1499 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1500 /* Second last? huh? */
1501 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001502 "Duplicate last fsn: %u (top: %u) -- abort\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001503 chk->rec.data.fsn, control->top_fsn);
Michael Tuexena9d8c472016-04-18 22:22:59 +02001504 sctp_abort_in_reasm(stcb, control,
Michael Tuexene5001952016-04-17 19:25:27 +02001505 chk, abort_flag,
1506 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1507 return;
1508 }
1509 if (asoc->idata_supported || control->first_frag_seen) {
1510 /*
1511 * For IDATA we always check since we know that
1512 * the first fragment is 0. For old DATA we have
Michael Tuexen34488e72016-05-03 22:11:59 +02001513 * to receive the first before we know the first FSN
Michael Tuexene5001952016-04-17 19:25:27 +02001514 * (which is the TSN).
1515 */
1516
Michael Tuexen00657ac2016-12-07 21:53:26 +01001517 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001518 /* We have already delivered up to this so its a dup */
1519 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001520 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001521 chk->rec.data.fsn, control->fsn_included);
Michael Tuexena9d8c472016-04-18 22:22:59 +02001522 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001523 abort_flag,
1524 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1525 return;
1526 }
1527 }
1528 /* validate not beyond top FSN if we have seen last one */
Michael Tuexen00657ac2016-12-07 21:53:26 +01001529 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001530 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001531 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001532 chk->rec.data.fsn,
Michael Tuexene5001952016-04-17 19:25:27 +02001533 control->top_fsn);
Michael Tuexena9d8c472016-04-18 22:22:59 +02001534 sctp_abort_in_reasm(stcb, control, chk,
Michael Tuexene5001952016-04-17 19:25:27 +02001535 abort_flag,
1536 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1537 return;
1538 }
1539 }
1540 /*
1541 * If we reach here, we need to place the
1542 * new chunk in the reassembly for this
1543 * control.
1544 */
1545 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001546 "chunk is a not first fsn: %u needs to be inserted\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001547 chk->rec.data.fsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001548 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01001549 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001550 /*
1551 * This one in queue is bigger than the new one, insert
1552 * the new one before at.
1553 */
1554 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001555 "Insert it before fsn: %u\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001556 at->rec.data.fsn);
Michael Tuexen3121b802016-04-10 23:28:19 +02001557 asoc->size_on_reasm_queue += chk->send_size;
1558 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
Michael Tuexene5001952016-04-17 19:25:27 +02001559 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1560 inserted = 1;
1561 break;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001562 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
Michael Tuexene5001952016-04-17 19:25:27 +02001563 /* Gak, He sent me a duplicate str seq number */
1564 /*
1565 * foo bar, I guess I will just free this new guy,
1566 * should we abort too? FIX ME MAYBE? Or it COULD be
1567 * that the SSN's have wrapped. Maybe I should
1568 * compare to TSN somehow... sigh for now just blow
1569 * away the chunk!
1570 */
1571 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001572 "Duplicate to fsn: %u -- abort\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001573 at->rec.data.fsn);
Michael Tuexena9d8c472016-04-18 22:22:59 +02001574 sctp_abort_in_reasm(stcb, control,
Michael Tuexene5001952016-04-17 19:25:27 +02001575 chk, abort_flag,
1576 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1577 return;
1578 }
1579 }
1580 if (inserted == 0) {
1581 /* Goes on the end */
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001582 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001583 chk->rec.data.fsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001584 asoc->size_on_reasm_queue += chk->send_size;
1585 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1586 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1587 }
1588 }
1589 /*
1590 * Ok lets see if we can suck any up into the control
1591 * structure that are in seq if it makes sense.
1592 */
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001593 do_wakeup = 0;
Michael Tuexene5001952016-04-17 19:25:27 +02001594 /*
1595 * If the first fragment has not been
1596 * seen there is no sense in looking.
1597 */
1598 if (control->first_frag_seen) {
1599 next_fsn = control->fsn_included + 1;
1600 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01001601 if (at->rec.data.fsn == next_fsn) {
Michael Tuexene5001952016-04-17 19:25:27 +02001602 /* We can add this one now to the control */
1603 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001604 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
Michael Tuexene5001952016-04-17 19:25:27 +02001605 control, at,
Michael Tuexen00657ac2016-12-07 21:53:26 +01001606 at->rec.data.fsn,
Michael Tuexene5001952016-04-17 19:25:27 +02001607 next_fsn, control->fsn_included);
1608 TAILQ_REMOVE(&control->reasm, at, sctp_next);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001609 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001610 if (control->on_read_q) {
1611 do_wakeup = 1;
Michael Tuexena7360a12017-09-17 11:30:34 +02001612 } else {
1613 /*
1614 * We only add to the size-on-all-streams
1615 * if its not on the read q. The read q
1616 * flag will cause a sballoc so its accounted
1617 * for there.
1618 */
1619 asoc->size_on_all_streams += lenadded;
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001620 }
Michael Tuexene5001952016-04-17 19:25:27 +02001621 next_fsn++;
1622 if (control->end_added && control->pdapi_started) {
1623 if (strm->pd_api_started) {
1624 strm->pd_api_started = 0;
1625 control->pdapi_started = 0;
1626 }
1627 if (control->on_read_q == 0) {
1628 sctp_add_to_readq(stcb->sctp_ep, stcb,
1629 control,
1630 &stcb->sctp_socket->so_rcv, control->end_added,
1631 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1632 }
1633 break;
1634 }
1635 } else {
Michael Tuexen3121b802016-04-10 23:28:19 +02001636 break;
1637 }
Michael Tuexen48ebe5e2016-04-10 23:10:14 +02001638 }
1639 }
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001640 if (do_wakeup) {
Michael Tuexen65394262016-05-09 08:17:54 +02001641#if defined(__Userspace__)
1642 sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD);
1643#endif
Michael Tuexen98b74552016-05-09 17:41:56 +02001644 /* Need to wakeup the reader */
Michael Tuexena9d8c472016-04-18 22:22:59 +02001645 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00001646 }
tuexendd729232011-11-01 23:04:43 +00001647}
1648
Michael Tuexene5001952016-04-17 19:25:27 +02001649static struct sctp_queued_to_read *
Michael Tuexen00657ac2016-12-07 21:53:26 +01001650sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
tuexendd729232011-11-01 23:04:43 +00001651{
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001652 struct sctp_queued_to_read *control;
1653
Michael Tuexene5001952016-04-17 19:25:27 +02001654 if (ordered) {
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001655 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01001656 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001657 break;
tuexendd729232011-11-01 23:04:43 +00001658 }
Michael Tuexene5001952016-04-17 19:25:27 +02001659 }
1660 } else {
Michael Tuexen00657ac2016-12-07 21:53:26 +01001661 if (idata_supported) {
1662 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1663 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1664 break;
1665 }
tuexendd729232011-11-01 23:04:43 +00001666 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01001667 } else {
1668 control = TAILQ_FIRST(&strm->uno_inqueue);
tuexendd729232011-11-01 23:04:43 +00001669 }
1670 }
Michael Tuexenb0298bf2016-04-27 21:01:33 +02001671 return (control);
tuexendd729232011-11-01 23:04:43 +00001672}
1673
tuexendd729232011-11-01 23:04:43 +00001674static int
1675sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
Michael Tuexene5001952016-04-17 19:25:27 +02001676 struct mbuf **m, int offset, int chk_length,
1677 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001678 int *break_flag, int last_chunk, uint8_t chk_type)
tuexendd729232011-11-01 23:04:43 +00001679{
Michael Tuexen022ef442018-05-21 17:04:54 +02001680 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
Michael Tuexen00657ac2016-12-07 21:53:26 +01001681 uint32_t tsn, fsn, gap, mid;
tuexendd729232011-11-01 23:04:43 +00001682 struct mbuf *dmbuf;
tuexen9784e9a2011-12-18 13:04:23 +00001683 int the_len;
tuexendd729232011-11-01 23:04:43 +00001684 int need_reasm_check = 0;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001685 uint16_t sid;
t00fcxen08f9ff92014-03-16 13:38:54 +00001686 struct mbuf *op_err;
1687 char msg[SCTP_DIAG_INFO_LEN];
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02001688 struct sctp_queued_to_read *control, *ncontrol;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001689 uint32_t ppid;
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001690 uint8_t chk_flags;
tuexendd729232011-11-01 23:04:43 +00001691 struct sctp_stream_reset_list *liste;
Michael Tuexene5001952016-04-17 19:25:27 +02001692 int ordered;
1693 size_t clen;
1694 int created_control = 0;
tuexendd729232011-11-01 23:04:43 +00001695
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001696 if (chk_type == SCTP_IDATA) {
1697 struct sctp_idata_chunk *chunk, chunk_buf;
1698
1699 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1700 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1701 chk_flags = chunk->ch.chunk_flags;
Michael Tuexene5001952016-04-17 19:25:27 +02001702 clen = sizeof(struct sctp_idata_chunk);
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001703 tsn = ntohl(chunk->dp.tsn);
1704 sid = ntohs(chunk->dp.sid);
1705 mid = ntohl(chunk->dp.mid);
1706 if (chk_flags & SCTP_DATA_FIRST_FRAG) {
Michael Tuexene5001952016-04-17 19:25:27 +02001707 fsn = 0;
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001708 ppid = chunk->dp.ppid_fsn.ppid;
1709 } else {
1710 fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1711 ppid = 0xffffffff; /* Use as an invalid value. */
1712 }
Michael Tuexene5001952016-04-17 19:25:27 +02001713 } else {
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001714 struct sctp_data_chunk *chunk, chunk_buf;
1715
1716 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1717 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1718 chk_flags = chunk->ch.chunk_flags;
Michael Tuexene5001952016-04-17 19:25:27 +02001719 clen = sizeof(struct sctp_data_chunk);
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001720 tsn = ntohl(chunk->dp.tsn);
1721 sid = ntohs(chunk->dp.sid);
1722 mid = (uint32_t)(ntohs(chunk->dp.ssn));
Michael Tuexene5001952016-04-17 19:25:27 +02001723 fsn = tsn;
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001724 ppid = chunk->dp.ppid;
Michael Tuexene5001952016-04-17 19:25:27 +02001725 }
Michael Tuexene5001952016-04-17 19:25:27 +02001726 if ((size_t)chk_length == clen) {
1727 /*
1728 * Need to send an abort since we had a
1729 * empty data chunk.
1730 */
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001731 op_err = sctp_generate_no_user_data_cause(tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001732 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1733 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1734 *abort_flag = 1;
1735 return (0);
1736 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001737 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
tuexendd729232011-11-01 23:04:43 +00001738 asoc->send_sack = 1;
1739 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001740 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
tuexendd729232011-11-01 23:04:43 +00001741 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1742 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1743 }
1744 if (stcb == NULL) {
1745 return (0);
1746 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001747 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
tuexendd729232011-11-01 23:04:43 +00001748 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1749 /* It is a duplicate */
1750 SCTP_STAT_INCR(sctps_recvdupdata);
1751 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1752 /* Record a dup for the next outbound sack */
1753 asoc->dup_tsns[asoc->numduptsns] = tsn;
1754 asoc->numduptsns++;
1755 }
1756 asoc->send_sack = 1;
1757 return (0);
1758 }
1759 /* Calculate the number of TSN's between the base and this TSN */
1760 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1761 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1762 /* Can't hold the bit in the mapping at max array, toss it */
1763 return (0);
1764 }
1765 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1766 SCTP_TCB_LOCK_ASSERT(stcb);
1767 if (sctp_expand_mapping_array(asoc, gap)) {
1768 /* Can't expand, drop it */
1769 return (0);
1770 }
1771 }
1772 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1773 *high_tsn = tsn;
1774 }
1775 /* See if we have received this one already */
1776 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1777 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1778 SCTP_STAT_INCR(sctps_recvdupdata);
1779 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1780 /* Record a dup for the next outbound sack */
1781 asoc->dup_tsns[asoc->numduptsns] = tsn;
1782 asoc->numduptsns++;
1783 }
1784 asoc->send_sack = 1;
1785 return (0);
1786 }
1787 /*
1788 * Check to see about the GONE flag, duplicates would cause a sack
1789 * to be sent up above
1790 */
1791 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1792 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
t00fcxen08f9ff92014-03-16 13:38:54 +00001793 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
tuexendd729232011-11-01 23:04:43 +00001794 /*
1795 * wait a minute, this guy is gone, there is no longer a
1796 * receiver. Send peer an ABORT!
1797 */
t00fcxen08f9ff92014-03-16 13:38:54 +00001798 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
tuexenda53ff02012-05-14 09:00:59 +00001799 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00001800 *abort_flag = 1;
1801 return (0);
1802 }
1803 /*
1804 * Now before going further we see if there is room. If NOT then we
1805 * MAY let one through only IF this TSN is the one we are waiting
1806 * for on a partial delivery API.
1807 */
1808
Michael Tuexene5001952016-04-17 19:25:27 +02001809 /* Is the stream valid? */
Michael Tuexen00657ac2016-12-07 21:53:26 +01001810 if (sid >= asoc->streamincnt) {
Michael Tuexenf39c4292015-09-12 19:39:48 +02001811 struct sctp_error_invalid_stream *cause;
tuexendd729232011-11-01 23:04:43 +00001812
Michael Tuexenf39c4292015-09-12 19:39:48 +02001813 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1814 0, M_NOWAIT, 1, MT_DATA);
1815 if (op_err != NULL) {
tuexendd729232011-11-01 23:04:43 +00001816 /* add some space up front so prepend will work well */
Michael Tuexenf39c4292015-09-12 19:39:48 +02001817 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1818 cause = mtod(op_err, struct sctp_error_invalid_stream *);
tuexendd729232011-11-01 23:04:43 +00001819 /*
1820 * Error causes are just param's and this one has
1821 * two back to back phdr, one with the error type
1822 * and size, the other with the streamid and a rsvd
1823 */
Michael Tuexenf39c4292015-09-12 19:39:48 +02001824 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1825 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1826 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001827 cause->stream_id = htons(sid);
Michael Tuexenf39c4292015-09-12 19:39:48 +02001828 cause->reserved = htons(0);
1829 sctp_queue_op_err(stcb, op_err);
tuexendd729232011-11-01 23:04:43 +00001830 }
1831 SCTP_STAT_INCR(sctps_badsid);
1832 SCTP_TCB_LOCK_ASSERT(stcb);
1833 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1834 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1835 asoc->highest_tsn_inside_nr_map = tsn;
1836 }
1837 if (tsn == (asoc->cumulative_tsn + 1)) {
1838 /* Update cum-ack */
1839 asoc->cumulative_tsn = tsn;
1840 }
1841 return (0);
1842 }
1843 /*
Michael Tuexene5001952016-04-17 19:25:27 +02001844 * If its a fragmented message, lets see if we can
1845 * find the control on the reassembly queues.
tuexendd729232011-11-01 23:04:43 +00001846 */
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001847 if ((chk_type == SCTP_IDATA) &&
1848 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001849 (fsn == 0)) {
Michael Tuexene5001952016-04-17 19:25:27 +02001850 /*
1851 * The first *must* be fsn 0, and other
1852 * (middle/end) pieces can *not* be fsn 0.
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001853 * XXX: This can happen in case of a wrap around.
1854 * Ignore is for now.
Michael Tuexene5001952016-04-17 19:25:27 +02001855 */
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001856 snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001857 mid, chk_flags);
Michael Tuexene5001952016-04-17 19:25:27 +02001858 goto err_out;
1859 }
Michael Tuexene411f662016-12-17 23:36:21 +01001860 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001861 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001862 chk_flags, control);
1863 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
Michael Tuexene5001952016-04-17 19:25:27 +02001864 /* See if we can find the re-assembly entity */
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001865 if (control != NULL) {
Michael Tuexene5001952016-04-17 19:25:27 +02001866 /* We found something, does it belong? */
Michael Tuexen00657ac2016-12-07 21:53:26 +01001867 if (ordered && (mid != control->mid)) {
1868 snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
Michael Tuexene5001952016-04-17 19:25:27 +02001869 err_out:
1870 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1871 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1872 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1873 *abort_flag = 1;
1874 return (0);
1875 }
1876 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1877 /* We can't have a switched order with an unordered chunk */
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001878 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1879 tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001880 goto err_out;
1881 }
1882 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1883 /* We can't have a switched unordered with a ordered chunk */
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001884 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1885 tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02001886 goto err_out;
1887 }
1888 }
1889 } else {
1890 /* Its a complete segment. Lets validate we
1891 * don't have a re-assembly going on with
1892 * the same Stream/Seq (for ordered) or in
1893 * the same Stream for unordered.
1894 */
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001895 if (control != NULL) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01001896 if (ordered || asoc->idata_supported) {
1897 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001898 chk_flags, mid);
Michael Tuexen00657ac2016-12-07 21:53:26 +01001899 snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
Michael Tuexen4ce5bad2016-08-10 19:24:19 +02001900 goto err_out;
1901 } else {
1902 if ((tsn == control->fsn_included + 1) &&
1903 (control->end_added == 0)) {
1904 snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1905 goto err_out;
1906 } else {
1907 control = NULL;
1908 }
1909 }
Michael Tuexene5001952016-04-17 19:25:27 +02001910 }
1911 }
1912 /* now do the tests */
1913 if (((asoc->cnt_on_all_streams +
1914 asoc->cnt_on_reasm_queue +
1915 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1916 (((int)asoc->my_rwnd) <= 0)) {
1917 /*
1918 * When we have NO room in the rwnd we check to make sure
1919 * the reader is doing its job...
1920 */
1921 if (stcb->sctp_socket->so_rcv.sb_cc) {
1922 /* some to read, wake-up */
1923#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1924 struct socket *so;
1925
1926 so = SCTP_INP_SO(stcb->sctp_ep);
1927 atomic_add_int(&stcb->asoc.refcnt, 1);
1928 SCTP_TCB_UNLOCK(stcb);
1929 SCTP_SOCKET_LOCK(so, 1);
1930 SCTP_TCB_LOCK(stcb);
1931 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1932 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1933 /* assoc was freed while we were unlocked */
1934 SCTP_SOCKET_UNLOCK(so, 1);
1935 return (0);
1936 }
1937#endif
1938 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1939#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1940 SCTP_SOCKET_UNLOCK(so, 1);
1941#endif
1942 }
1943 /* now is it in the mapping array of what we have accepted? */
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001944 if (chk_type == SCTP_DATA) {
Michael Tuexene5001952016-04-17 19:25:27 +02001945 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1946 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1947 /* Nope not in the valid range dump it */
1948 dump_packet:
1949 sctp_set_rwnd(stcb, asoc);
1950 if ((asoc->cnt_on_all_streams +
1951 asoc->cnt_on_reasm_queue +
1952 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1953 SCTP_STAT_INCR(sctps_datadropchklmt);
1954 } else {
1955 SCTP_STAT_INCR(sctps_datadroprwnd);
1956 }
1957 *break_flag = 1;
1958 return (0);
1959 }
1960 } else {
1961 if (control == NULL) {
1962 goto dump_packet;
1963 }
1964 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1965 goto dump_packet;
1966 }
1967 }
1968 }
tuexendd729232011-11-01 23:04:43 +00001969#ifdef SCTP_ASOCLOG_OF_TSNS
1970 SCTP_TCB_LOCK_ASSERT(stcb);
1971 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1972 asoc->tsn_in_at = 0;
1973 asoc->tsn_in_wrapped = 1;
1974 }
1975 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
Michael Tuexen00657ac2016-12-07 21:53:26 +01001976 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1977 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
tuexendd729232011-11-01 23:04:43 +00001978 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1979 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1980 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1981 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1982 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1983 asoc->tsn_in_at++;
1984#endif
Michael Tuexene5001952016-04-17 19:25:27 +02001985 /*
1986 * Before we continue lets validate that we are not being fooled by
1987 * an evil attacker. We can only have Nk chunks based on our TSN
1988 * spread allowed by the mapping array N * 8 bits, so there is no
1989 * way our stream sequence numbers could have wrapped. We of course
1990 * only validate the FIRST fragment so the bit must be set.
1991 */
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001992 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
tuexendd729232011-11-01 23:04:43 +00001993 (TAILQ_EMPTY(&asoc->resetHead)) &&
Michael Tuexendbfc1b82016-12-11 14:57:19 +01001994 (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
Michael Tuexen00657ac2016-12-07 21:53:26 +01001995 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
tuexendd729232011-11-01 23:04:43 +00001996 /* The incoming sseq is behind where we last delivered? */
Michael Tuexeneccb4be2016-04-18 08:58:59 +02001997 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01001998 mid, asoc->strmin[sid].last_mid_delivered);
tuexendd729232011-11-01 23:04:43 +00001999
Michael Tuexen00657ac2016-12-07 21:53:26 +01002000 if (asoc->idata_supported) {
2001 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2002 asoc->strmin[sid].last_mid_delivered,
2003 tsn,
2004 sid,
2005 mid);
2006 } else {
2007 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2008 (uint16_t)asoc->strmin[sid].last_mid_delivered,
2009 tsn,
2010 sid,
2011 (uint16_t)mid);
2012 }
t00fcxen08f9ff92014-03-16 13:38:54 +00002013 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
t00fcxen0057a6d2015-05-28 16:42:49 +00002014 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
t00fcxen08f9ff92014-03-16 13:38:54 +00002015 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00002016 *abort_flag = 1;
2017 return (0);
2018 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002019 if (chk_type == SCTP_IDATA) {
Michael Tuexene5001952016-04-17 19:25:27 +02002020 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2021 } else {
2022 the_len = (chk_length - sizeof(struct sctp_data_chunk));
2023 }
tuexendd729232011-11-01 23:04:43 +00002024 if (last_chunk == 0) {
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002025 if (chk_type == SCTP_IDATA) {
Michael Tuexene5001952016-04-17 19:25:27 +02002026 dmbuf = SCTP_M_COPYM(*m,
2027 (offset + sizeof(struct sctp_idata_chunk)),
2028 the_len, M_NOWAIT);
2029 } else {
2030 dmbuf = SCTP_M_COPYM(*m,
2031 (offset + sizeof(struct sctp_data_chunk)),
2032 the_len, M_NOWAIT);
2033 }
tuexendd729232011-11-01 23:04:43 +00002034#ifdef SCTP_MBUF_LOGGING
2035 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
t00fcxen8285bce2015-01-10 21:09:55 +00002036 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
tuexendd729232011-11-01 23:04:43 +00002037 }
2038#endif
2039 } else {
2040 /* We can steal the last chunk */
2041 int l_len;
2042 dmbuf = *m;
2043 /* lop off the top part */
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002044 if (chk_type == SCTP_IDATA) {
Michael Tuexene5001952016-04-17 19:25:27 +02002045 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2046 } else {
2047 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2048 }
tuexendd729232011-11-01 23:04:43 +00002049 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2050 l_len = SCTP_BUF_LEN(dmbuf);
2051 } else {
2052 /* need to count up the size hopefully
2053 * does not hit this to often :-0
2054 */
2055 struct mbuf *lat;
tuexen63fc0bb2011-12-27 12:24:52 +00002056
tuexendd729232011-11-01 23:04:43 +00002057 l_len = 0;
tuexen63fc0bb2011-12-27 12:24:52 +00002058 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
tuexendd729232011-11-01 23:04:43 +00002059 l_len += SCTP_BUF_LEN(lat);
tuexendd729232011-11-01 23:04:43 +00002060 }
2061 }
2062 if (l_len > the_len) {
2063 /* Trim the end round bytes off too */
2064 m_adj(dmbuf, -(l_len - the_len));
2065 }
2066 }
2067 if (dmbuf == NULL) {
2068 SCTP_STAT_INCR(sctps_nomem);
2069 return (0);
2070 }
Michael Tuexene5001952016-04-17 19:25:27 +02002071 /*
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002072 * Now no matter what, we need a control, get one
Michael Tuexene5001952016-04-17 19:25:27 +02002073 * if we don't have one (we may have gotten it
2074 * above when we found the message was fragmented
2075 */
2076 if (control == NULL) {
2077 sctp_alloc_a_readq(stcb, control);
2078 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
Michael Tuexen00657ac2016-12-07 21:53:26 +01002079 ppid,
2080 sid,
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002081 chk_flags,
Michael Tuexen00657ac2016-12-07 21:53:26 +01002082 NULL, fsn, mid);
Michael Tuexene5001952016-04-17 19:25:27 +02002083 if (control == NULL) {
2084 SCTP_STAT_INCR(sctps_nomem);
2085 return (0);
2086 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002087 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
Michael Tuexen3381e772017-07-19 17:14:51 +02002088 struct mbuf *mm;
2089
Michael Tuexene5001952016-04-17 19:25:27 +02002090 control->data = dmbuf;
Michael Tuexen3381e772017-07-19 17:14:51 +02002091 for (mm = control->data; mm; mm = mm->m_next) {
2092 control->length += SCTP_BUF_LEN(mm);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002093 }
Michael Tuexene5001952016-04-17 19:25:27 +02002094 control->tail_mbuf = NULL;
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002095 control->end_added = 1;
2096 control->last_frag_seen = 1;
2097 control->first_frag_seen = 1;
2098 control->fsn_included = fsn;
2099 control->top_fsn = fsn;
Michael Tuexene5001952016-04-17 19:25:27 +02002100 }
2101 created_control = 1;
2102 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01002103 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002104 chk_flags, ordered, mid, control);
2105 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
tuexendd729232011-11-01 23:04:43 +00002106 TAILQ_EMPTY(&asoc->resetHead) &&
2107 ((ordered == 0) ||
Michael Tuexen00657ac2016-12-07 21:53:26 +01002108 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2109 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
tuexendd729232011-11-01 23:04:43 +00002110 /* Candidate for express delivery */
2111 /*
2112 * Its not fragmented, No PD-API is up, Nothing in the
2113 * delivery queue, Its un-ordered OR ordered and the next to
2114 * deliver AND nothing else is stuck on the stream queue,
2115 * And there is room for it in the socket buffer. Lets just
2116 * stuff it up the buffer....
2117 */
tuexendd729232011-11-01 23:04:43 +00002118 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2119 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2120 asoc->highest_tsn_inside_nr_map = tsn;
2121 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01002122 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2123 control, mid);
Michael Tuexene5001952016-04-17 19:25:27 +02002124
tuexendd729232011-11-01 23:04:43 +00002125 sctp_add_to_readq(stcb->sctp_ep, stcb,
2126 control, &stcb->sctp_socket->so_rcv,
2127 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2128
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002129 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
tuexendd729232011-11-01 23:04:43 +00002130 /* for ordered, bump what we delivered */
Michael Tuexene411f662016-12-17 23:36:21 +01002131 asoc->strmin[sid].last_mid_delivered++;
tuexendd729232011-11-01 23:04:43 +00002132 }
2133 SCTP_STAT_INCR(sctps_recvexpress);
2134 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01002135 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
tuexendd729232011-11-01 23:04:43 +00002136 SCTP_STR_LOG_FROM_EXPRS_DEL);
2137 }
2138 control = NULL;
tuexendd729232011-11-01 23:04:43 +00002139 goto finish_express_del;
2140 }
tuexen63fc0bb2011-12-27 12:24:52 +00002141
Michael Tuexene5001952016-04-17 19:25:27 +02002142 /* Now will we need a chunk too? */
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002143 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
tuexendd729232011-11-01 23:04:43 +00002144 sctp_alloc_a_chunk(stcb, chk);
2145 if (chk == NULL) {
2146 /* No memory so we drop the chunk */
2147 SCTP_STAT_INCR(sctps_nomem);
2148 if (last_chunk == 0) {
2149 /* we copied it, free the copy */
2150 sctp_m_freem(dmbuf);
2151 }
2152 return (0);
2153 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01002154 chk->rec.data.tsn = tsn;
tuexendd729232011-11-01 23:04:43 +00002155 chk->no_fr_allowed = 0;
Michael Tuexen00657ac2016-12-07 21:53:26 +01002156 chk->rec.data.fsn = fsn;
2157 chk->rec.data.mid = mid;
2158 chk->rec.data.sid = sid;
2159 chk->rec.data.ppid = ppid;
tuexendd729232011-11-01 23:04:43 +00002160 chk->rec.data.context = stcb->asoc.context;
2161 chk->rec.data.doing_fast_retransmit = 0;
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002162 chk->rec.data.rcv_flags = chk_flags;
tuexendd729232011-11-01 23:04:43 +00002163 chk->asoc = asoc;
2164 chk->send_size = the_len;
2165 chk->whoTo = net;
Michael Tuexen00657ac2016-12-07 21:53:26 +01002166 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
Michael Tuexene5001952016-04-17 19:25:27 +02002167 chk,
Michael Tuexen00657ac2016-12-07 21:53:26 +01002168 control, mid);
tuexendd729232011-11-01 23:04:43 +00002169 atomic_add_int(&net->ref_count, 1);
2170 chk->data = dmbuf;
Michael Tuexen3121b802016-04-10 23:28:19 +02002171 }
Michael Tuexene5001952016-04-17 19:25:27 +02002172 /* Set the appropriate TSN mark */
2173 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2174 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2175 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2176 asoc->highest_tsn_inside_nr_map = tsn;
2177 }
2178 } else {
2179 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2180 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2181 asoc->highest_tsn_inside_map = tsn;
2182 }
2183 }
2184 /* Now is it complete (i.e. not fragmented)? */
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002185 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
Michael Tuexene5001952016-04-17 19:25:27 +02002186 /*
2187 * Special check for when streams are resetting. We
2188 * could be more smart about this and check the
2189 * actual stream to see if it is not being reset..
2190 * that way we would not create a HOLB when amongst
2191 * streams being reset and those not being reset.
2192 *
2193 */
2194 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2195 SCTP_TSN_GT(tsn, liste->tsn)) {
Michael Tuexen3121b802016-04-10 23:28:19 +02002196 /*
Michael Tuexene5001952016-04-17 19:25:27 +02002197 * yep its past where we need to reset... go
2198 * ahead and queue it.
Michael Tuexen3121b802016-04-10 23:28:19 +02002199 */
Michael Tuexene5001952016-04-17 19:25:27 +02002200 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2201 /* first one on */
2202 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2203 } else {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002204 struct sctp_queued_to_read *lcontrol, *nlcontrol;
Michael Tuexene5001952016-04-17 19:25:27 +02002205 unsigned char inserted = 0;
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002206 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2207 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
Michael Tuexen3121b802016-04-10 23:28:19 +02002208
Michael Tuexene5001952016-04-17 19:25:27 +02002209 continue;
2210 } else {
2211 /* found it */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002212 TAILQ_INSERT_BEFORE(lcontrol, control, next);
Michael Tuexene5001952016-04-17 19:25:27 +02002213 inserted = 1;
2214 break;
2215 }
Michael Tuexen3121b802016-04-10 23:28:19 +02002216 }
Michael Tuexene5001952016-04-17 19:25:27 +02002217 if (inserted == 0) {
2218 /*
2219 * must be put at end, use
2220 * prevP (all setup from
2221 * loop) to setup nextP.
2222 */
2223 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2224 }
2225 }
2226 goto finish_express_del;
2227 }
Michael Tuexendbfc1b82016-12-11 14:57:19 +01002228 if (chk_flags & SCTP_DATA_UNORDERED) {
Michael Tuexene5001952016-04-17 19:25:27 +02002229 /* queue directly into socket buffer */
Michael Tuexen00657ac2016-12-07 21:53:26 +01002230 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2231 control, mid);
Michael Tuexene5001952016-04-17 19:25:27 +02002232 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2233 sctp_add_to_readq(stcb->sctp_ep, stcb,
2234 control,
2235 &stcb->sctp_socket->so_rcv, 1,
2236 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2237
2238 } else {
Michael Tuexen00657ac2016-12-07 21:53:26 +01002239 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2240 mid);
Michael Tuexene411f662016-12-17 23:36:21 +01002241 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
Michael Tuexene5001952016-04-17 19:25:27 +02002242 if (*abort_flag) {
t00fcxen2ed8f3d2014-04-23 21:28:37 +00002243 if (last_chunk) {
2244 *m = NULL;
2245 }
tuexendd729232011-11-01 23:04:43 +00002246 return (0);
tuexendd729232011-11-01 23:04:43 +00002247 }
2248 }
Michael Tuexene5001952016-04-17 19:25:27 +02002249 goto finish_express_del;
2250 }
2251 /* If we reach here its a reassembly */
2252 need_reasm_check = 1;
2253 SCTPDBG(SCTP_DEBUG_XXX,
Michael Tuexen00657ac2016-12-07 21:53:26 +01002254 "Queue data to stream for reasm control: %p MID: %u\n",
2255 control, mid);
Michael Tuexene411f662016-12-17 23:36:21 +01002256 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02002257 if (*abort_flag) {
2258 /*
2259 * the assoc is now gone and chk was put onto the
2260 * reasm queue, which has all been freed.
2261 */
2262 if (last_chunk) {
2263 *m = NULL;
tuexendd729232011-11-01 23:04:43 +00002264 }
Michael Tuexene5001952016-04-17 19:25:27 +02002265 return (0);
tuexendd729232011-11-01 23:04:43 +00002266 }
2267finish_express_del:
Michael Tuexene5001952016-04-17 19:25:27 +02002268 /* Here we tidy up things */
tuexen15f99d82012-04-19 16:08:38 +00002269 if (tsn == (asoc->cumulative_tsn + 1)) {
2270 /* Update cum-ack */
2271 asoc->cumulative_tsn = tsn;
tuexendd729232011-11-01 23:04:43 +00002272 }
2273 if (last_chunk) {
2274 *m = NULL;
2275 }
2276 if (ordered) {
2277 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2278 } else {
2279 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2280 }
2281 SCTP_STAT_INCR(sctps_recvdata);
2282 /* Set it present please */
2283 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01002284 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
tuexendd729232011-11-01 23:04:43 +00002285 }
2286 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2287 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2288 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2289 }
Michael Tuexene411f662016-12-17 23:36:21 +01002290 if (need_reasm_check) {
2291 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2292 need_reasm_check = 0;
2293 }
tuexendd729232011-11-01 23:04:43 +00002294 /* check the special flag for stream resets */
2295 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2296 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2297 /*
2298 * we have finished working through the backlogged TSN's now
2299 * time to reset streams. 1: call reset function. 2: free
2300 * pending_reply space 3: distribute any chunks in
2301 * pending_reply_queue.
2302 */
t00fcxen0f0d87f2012-09-07 13:42:20 +00002303 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
tuexendd729232011-11-01 23:04:43 +00002304 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
Michael Tüxen6b4d2922015-07-22 13:55:48 +02002305 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
tuexendd729232011-11-01 23:04:43 +00002306 SCTP_FREE(liste, SCTP_M_STRESET);
2307 /*sa_ignore FREED_MEMORY*/
2308 liste = TAILQ_FIRST(&asoc->resetHead);
2309 if (TAILQ_EMPTY(&asoc->resetHead)) {
2310 /* All can be removed */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002311 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2312 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2313 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
tuexendd729232011-11-01 23:04:43 +00002314 if (*abort_flag) {
tuexen63fc0bb2011-12-27 12:24:52 +00002315 return (0);
tuexendd729232011-11-01 23:04:43 +00002316 }
Michael Tuexene411f662016-12-17 23:36:21 +01002317 if (need_reasm_check) {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002318 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
Michael Tuexene411f662016-12-17 23:36:21 +01002319 need_reasm_check = 0;
2320 }
tuexendd729232011-11-01 23:04:43 +00002321 }
2322 } else {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002323 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2324 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
tuexendd729232011-11-01 23:04:43 +00002325 break;
2326 }
2327 /*
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002328 * if control->sinfo_tsn is <= liste->tsn we can
tuexendd729232011-11-01 23:04:43 +00002329 * process it which is the NOT of
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002330 * control->sinfo_tsn > liste->tsn
tuexendd729232011-11-01 23:04:43 +00002331 */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002332 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2333 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
tuexendd729232011-11-01 23:04:43 +00002334 if (*abort_flag) {
tuexen63fc0bb2011-12-27 12:24:52 +00002335 return (0);
tuexendd729232011-11-01 23:04:43 +00002336 }
Michael Tuexene411f662016-12-17 23:36:21 +01002337 if (need_reasm_check) {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02002338 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
Michael Tuexene411f662016-12-17 23:36:21 +01002339 need_reasm_check = 0;
2340 }
tuexendd729232011-11-01 23:04:43 +00002341 }
2342 }
tuexendd729232011-11-01 23:04:43 +00002343 }
2344 return (1);
2345}
2346
Michael Tuexen81055222016-03-23 14:40:10 +01002347static const int8_t sctp_map_lookup_tab[256] = {
tuexendd729232011-11-01 23:04:43 +00002348 0, 1, 0, 2, 0, 1, 0, 3,
2349 0, 1, 0, 2, 0, 1, 0, 4,
2350 0, 1, 0, 2, 0, 1, 0, 3,
2351 0, 1, 0, 2, 0, 1, 0, 5,
2352 0, 1, 0, 2, 0, 1, 0, 3,
2353 0, 1, 0, 2, 0, 1, 0, 4,
2354 0, 1, 0, 2, 0, 1, 0, 3,
2355 0, 1, 0, 2, 0, 1, 0, 6,
2356 0, 1, 0, 2, 0, 1, 0, 3,
2357 0, 1, 0, 2, 0, 1, 0, 4,
2358 0, 1, 0, 2, 0, 1, 0, 3,
2359 0, 1, 0, 2, 0, 1, 0, 5,
2360 0, 1, 0, 2, 0, 1, 0, 3,
2361 0, 1, 0, 2, 0, 1, 0, 4,
2362 0, 1, 0, 2, 0, 1, 0, 3,
2363 0, 1, 0, 2, 0, 1, 0, 7,
2364 0, 1, 0, 2, 0, 1, 0, 3,
2365 0, 1, 0, 2, 0, 1, 0, 4,
2366 0, 1, 0, 2, 0, 1, 0, 3,
2367 0, 1, 0, 2, 0, 1, 0, 5,
2368 0, 1, 0, 2, 0, 1, 0, 3,
2369 0, 1, 0, 2, 0, 1, 0, 4,
2370 0, 1, 0, 2, 0, 1, 0, 3,
2371 0, 1, 0, 2, 0, 1, 0, 6,
2372 0, 1, 0, 2, 0, 1, 0, 3,
2373 0, 1, 0, 2, 0, 1, 0, 4,
2374 0, 1, 0, 2, 0, 1, 0, 3,
2375 0, 1, 0, 2, 0, 1, 0, 5,
2376 0, 1, 0, 2, 0, 1, 0, 3,
2377 0, 1, 0, 2, 0, 1, 0, 4,
2378 0, 1, 0, 2, 0, 1, 0, 3,
2379 0, 1, 0, 2, 0, 1, 0, 8
2380};
2381
2382
2383void
2384sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2385{
2386 /*
2387 * Now we also need to check the mapping array in a couple of ways.
2388 * 1) Did we move the cum-ack point?
2389 *
2390 * When you first glance at this you might think
Michael Tuexen34488e72016-05-03 22:11:59 +02002391 * that all entries that make up the position
tuexendd729232011-11-01 23:04:43 +00002392 * of the cum-ack would be in the nr-mapping array
2393 * only.. i.e. things up to the cum-ack are always
2394 * deliverable. Thats true with one exception, when
2395 * its a fragmented message we may not deliver the data
2396 * until some threshold (or all of it) is in place. So
2397 * we must OR the nr_mapping_array and mapping_array to
2398 * get a true picture of the cum-ack.
2399 */
2400 struct sctp_association *asoc;
2401 int at;
2402 uint8_t val;
2403 int slide_from, slide_end, lgap, distance;
2404 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2405
2406 asoc = &stcb->asoc;
tuexendd729232011-11-01 23:04:43 +00002407
2408 old_cumack = asoc->cumulative_tsn;
2409 old_base = asoc->mapping_array_base_tsn;
2410 old_highest = asoc->highest_tsn_inside_map;
2411 /*
2412 * We could probably improve this a small bit by calculating the
2413 * offset of the current cum-ack as the starting point.
2414 */
2415 at = 0;
2416 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2417 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2418 if (val == 0xff) {
2419 at += 8;
2420 } else {
2421 /* there is a 0 bit */
2422 at += sctp_map_lookup_tab[val];
2423 break;
2424 }
2425 }
2426 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at-1);
2427
2428 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2429 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2430#ifdef INVARIANTS
2431 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2432 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2433#else
2434 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2435 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2436 sctp_print_mapping_array(asoc);
2437 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2438 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2439 }
2440 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2441 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2442#endif
2443 }
2444 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2445 highest_tsn = asoc->highest_tsn_inside_nr_map;
2446 } else {
2447 highest_tsn = asoc->highest_tsn_inside_map;
2448 }
2449 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2450 /* The complete array was completed by a single FR */
2451 /* highest becomes the cum-ack */
2452 int clr;
2453#ifdef INVARIANTS
2454 unsigned int i;
2455#endif
2456
2457 /* clear the array */
2458 clr = ((at+7) >> 3);
2459 if (clr > asoc->mapping_array_size) {
2460 clr = asoc->mapping_array_size;
2461 }
2462 memset(asoc->mapping_array, 0, clr);
2463 memset(asoc->nr_mapping_array, 0, clr);
2464#ifdef INVARIANTS
2465 for (i = 0; i < asoc->mapping_array_size; i++) {
2466 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
tuexencb5fe8d2012-05-04 09:50:27 +00002467 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
tuexendd729232011-11-01 23:04:43 +00002468 sctp_print_mapping_array(asoc);
2469 }
2470 }
2471#endif
2472 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2473 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2474 } else if (at >= 8) {
2475 /* we can slide the mapping array down */
2476 /* slide_from holds where we hit the first NON 0xff byte */
2477
2478 /*
2479 * now calculate the ceiling of the move using our highest
2480 * TSN value
2481 */
2482 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2483 slide_end = (lgap >> 3);
2484 if (slide_end < slide_from) {
2485 sctp_print_mapping_array(asoc);
2486#ifdef INVARIANTS
2487 panic("impossible slide");
2488#else
Michael Tuexeneccb4be2016-04-18 08:58:59 +02002489 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
tuexencb5fe8d2012-05-04 09:50:27 +00002490 lgap, slide_end, slide_from, at);
tuexendd729232011-11-01 23:04:43 +00002491 return;
2492#endif
2493 }
2494 if (slide_end > asoc->mapping_array_size) {
2495#ifdef INVARIANTS
2496 panic("would overrun buffer");
2497#else
Michael Tuexeneccb4be2016-04-18 08:58:59 +02002498 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
tuexencb5fe8d2012-05-04 09:50:27 +00002499 asoc->mapping_array_size, slide_end);
tuexendd729232011-11-01 23:04:43 +00002500 slide_end = asoc->mapping_array_size;
2501#endif
2502 }
2503 distance = (slide_end - slide_from) + 1;
2504 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2505 sctp_log_map(old_base, old_cumack, old_highest,
2506 SCTP_MAP_PREPARE_SLIDE);
2507 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2508 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2509 }
2510 if (distance + slide_from > asoc->mapping_array_size ||
2511 distance < 0) {
2512 /*
2513 * Here we do NOT slide forward the array so that
2514 * hopefully when more data comes in to fill it up
2515 * we will be able to slide it forward. Really I
2516 * don't think this should happen :-0
2517 */
2518
2519 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2520 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2521 (uint32_t) asoc->mapping_array_size,
2522 SCTP_MAP_SLIDE_NONE);
2523 }
2524 } else {
2525 int ii;
2526
2527 for (ii = 0; ii < distance; ii++) {
2528 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2529 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2530
2531 }
2532 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2533 asoc->mapping_array[ii] = 0;
2534 asoc->nr_mapping_array[ii] = 0;
2535 }
2536 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2537 asoc->highest_tsn_inside_map += (slide_from << 3);
2538 }
2539 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2540 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2541 }
2542 asoc->mapping_array_base_tsn += (slide_from << 3);
2543 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2544 sctp_log_map(asoc->mapping_array_base_tsn,
2545 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2546 SCTP_MAP_SLIDE_RESULT);
2547 }
2548 }
2549 }
2550}
2551
2552void
tuexen9784e9a2011-12-18 13:04:23 +00002553sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
tuexendd729232011-11-01 23:04:43 +00002554{
2555 struct sctp_association *asoc;
2556 uint32_t highest_tsn;
Michael Tuexen753e4452016-12-09 19:20:11 +01002557 int is_a_gap;
tuexen9784e9a2011-12-18 13:04:23 +00002558
Michael Tuexen753e4452016-12-09 19:20:11 +01002559 sctp_slide_mapping_arrays(stcb);
tuexendd729232011-11-01 23:04:43 +00002560 asoc = &stcb->asoc;
2561 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2562 highest_tsn = asoc->highest_tsn_inside_nr_map;
2563 } else {
2564 highest_tsn = asoc->highest_tsn_inside_map;
2565 }
Michael Tuexen753e4452016-12-09 19:20:11 +01002566 /* Is there a gap now? */
2567 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
tuexendd729232011-11-01 23:04:43 +00002568
2569 /*
2570 * Now we need to see if we need to queue a sack or just start the
2571 * timer (if allowed).
2572 */
Michael Tuexen348a36c2018-08-13 16:24:47 +02002573 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
tuexendd729232011-11-01 23:04:43 +00002574 /*
2575 * Ok special case, in SHUTDOWN-SENT case. here we
2576 * maker sure SACK timer is off and instead send a
2577 * SHUTDOWN and a SACK
2578 */
2579 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2580 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
t00fcxen0057a6d2015-05-28 16:42:49 +00002581 stcb->sctp_ep, stcb, NULL,
Michael Tuexene5001952016-04-17 19:25:27 +02002582 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
tuexendd729232011-11-01 23:04:43 +00002583 }
tuexen15f99d82012-04-19 16:08:38 +00002584 sctp_send_shutdown(stcb,
Michael Tuexen753e4452016-12-09 19:20:11 +01002585 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2586 if (is_a_gap) {
2587 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2588 }
tuexendd729232011-11-01 23:04:43 +00002589 } else {
tuexendd729232011-11-01 23:04:43 +00002590 /*
2591 * CMT DAC algorithm: increase number of packets
2592 * received since last ack
2593 */
2594 stcb->asoc.cmt_dac_pkts_rcvd++;
tuexen15f99d82012-04-19 16:08:38 +00002595
tuexendd729232011-11-01 23:04:43 +00002596 if ((stcb->asoc.send_sack == 1) || /* We need to send a SACK */
2597 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2598 * longer is one */
2599 (stcb->asoc.numduptsns) || /* we have dup's */
2600 (is_a_gap) || /* is still a gap */
2601 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2602 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2603 ) {
2604
tuexen63fc0bb2011-12-27 12:24:52 +00002605 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
tuexendd729232011-11-01 23:04:43 +00002606 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2607 (stcb->asoc.send_sack == 0) &&
2608 (stcb->asoc.numduptsns == 0) &&
2609 (stcb->asoc.delayed_ack) &&
2610 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2611
2612 /*
2613 * CMT DAC algorithm: With CMT,
2614 * delay acks even in the face of
2615
2616 * reordering. Therefore, if acks
2617 * that do not have to be sent
2618 * because of the above reasons,
2619 * will be delayed. That is, acks
2620 * that would have been sent due to
2621 * gap reports will be delayed with
2622 * DAC. Start the delayed ack timer.
2623 */
2624 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2625 stcb->sctp_ep, stcb, NULL);
2626 } else {
2627 /*
2628 * Ok we must build a SACK since the
2629 * timer is pending, we got our
2630 * first packet OR there are gaps or
2631 * duplicates.
2632 */
2633 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2634 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2635 }
2636 } else {
2637 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2638 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2639 stcb->sctp_ep, stcb, NULL);
2640 }
2641 }
2642 }
2643}
2644
tuexendd729232011-11-01 23:04:43 +00002645int
2646sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
Michael Tüxen9843e062015-08-02 18:10:36 +02002647 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2648 struct sctp_nets *net, uint32_t *high_tsn)
tuexendd729232011-11-01 23:04:43 +00002649{
Michael Tuexene5001952016-04-17 19:25:27 +02002650 struct sctp_chunkhdr *ch, chunk_buf;
tuexendd729232011-11-01 23:04:43 +00002651 struct sctp_association *asoc;
2652 int num_chunks = 0; /* number of control chunks processed */
2653 int stop_proc = 0;
Michael Tuexen48b98022017-10-18 23:12:24 +02002654 int break_flag, last_chunk;
tuexendd729232011-11-01 23:04:43 +00002655 int abort_flag = 0, was_a_gap;
2656 struct mbuf *m;
2657 uint32_t highest_tsn;
Michael Tuexen48b98022017-10-18 23:12:24 +02002658 uint16_t chk_length;
tuexendd729232011-11-01 23:04:43 +00002659
2660 /* set the rwnd */
2661 sctp_set_rwnd(stcb, &stcb->asoc);
2662
2663 m = *mm;
2664 SCTP_TCB_LOCK_ASSERT(stcb);
2665 asoc = &stcb->asoc;
2666 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2667 highest_tsn = asoc->highest_tsn_inside_nr_map;
2668 } else {
2669 highest_tsn = asoc->highest_tsn_inside_map;
2670 }
2671 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2672 /*
2673 * setup where we got the last DATA packet from for any SACK that
2674 * may need to go out. Don't bump the net. This is done ONLY when a
2675 * chunk is assigned.
2676 */
2677 asoc->last_data_chunk_from = net;
2678
2679#ifndef __Panda__
2680 /*-
2681 * Now before we proceed we must figure out if this is a wasted
2682 * cluster... i.e. it is a small packet sent in and yet the driver
2683 * underneath allocated a full cluster for it. If so we must copy it
2684 * to a smaller mbuf and free up the cluster mbuf. This will help
2685 * with cluster starvation. Note for __Panda__ we don't do this
2686 * since it has clusters all the way down to 64 bytes.
2687 */
2688 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2689 /* we only handle mbufs that are singletons.. not chains */
t00fcxen23c2b8f2012-12-10 20:15:50 +00002690 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
tuexendd729232011-11-01 23:04:43 +00002691 if (m) {
2692 /* ok lets see if we can copy the data up */
2693 caddr_t *from, *to;
2694 /* get the pointers and copy */
2695 to = mtod(m, caddr_t *);
2696 from = mtod((*mm), caddr_t *);
2697 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2698 /* copy the length and free up the old */
2699 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2700 sctp_m_freem(*mm);
Michael Tuexen34488e72016-05-03 22:11:59 +02002701 /* success, back copy */
tuexendd729232011-11-01 23:04:43 +00002702 *mm = m;
2703 } else {
2704 /* We are in trouble in the mbuf world .. yikes */
2705 m = *mm;
2706 }
2707 }
2708#endif
2709 /* get pointer to the first chunk header */
Michael Tuexene5001952016-04-17 19:25:27 +02002710 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
Michael Tuexen97aebbe2017-10-18 22:22:05 +02002711 sizeof(struct sctp_chunkhdr),
2712 (uint8_t *)&chunk_buf);
tuexendd729232011-11-01 23:04:43 +00002713 if (ch == NULL) {
2714 return (1);
2715 }
2716 /*
2717 * process all DATA chunks...
2718 */
2719 *high_tsn = asoc->cumulative_tsn;
2720 break_flag = 0;
2721 asoc->data_pkts_seen++;
2722 while (stop_proc == 0) {
2723 /* validate chunk length */
Michael Tuexene5001952016-04-17 19:25:27 +02002724 chk_length = ntohs(ch->chunk_length);
tuexendd729232011-11-01 23:04:43 +00002725 if (length - *offset < chk_length) {
2726 /* all done, mutulated chunk */
2727 stop_proc = 1;
tuexen63fc0bb2011-12-27 12:24:52 +00002728 continue;
tuexendd729232011-11-01 23:04:43 +00002729 }
Michael Tuexene5001952016-04-17 19:25:27 +02002730 if ((asoc->idata_supported == 1) &&
2731 (ch->chunk_type == SCTP_DATA)) {
2732 struct mbuf *op_err;
2733 char msg[SCTP_DIAG_INFO_LEN];
2734
Michael Tuexen3a9472d2016-05-12 16:45:18 +02002735 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
Michael Tuexene5001952016-04-17 19:25:27 +02002736 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2737 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2738 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2739 return (2);
2740 }
2741 if ((asoc->idata_supported == 0) &&
2742 (ch->chunk_type == SCTP_IDATA)) {
2743 struct mbuf *op_err;
2744 char msg[SCTP_DIAG_INFO_LEN];
2745
Michael Tuexen3a9472d2016-05-12 16:45:18 +02002746 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
Michael Tuexene5001952016-04-17 19:25:27 +02002747 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2748 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2749 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2750 return (2);
2751 }
2752 if ((ch->chunk_type == SCTP_DATA) ||
2753 (ch->chunk_type == SCTP_IDATA)) {
Michael Tuexen48b98022017-10-18 23:12:24 +02002754 uint16_t clen;
Michael Tuexen66c84932016-04-18 11:42:41 +02002755
Michael Tuexene5001952016-04-17 19:25:27 +02002756 if (ch->chunk_type == SCTP_DATA) {
2757 clen = sizeof(struct sctp_data_chunk);
2758 } else {
2759 clen = sizeof(struct sctp_idata_chunk);
2760 }
Michael Tuexen66c84932016-04-18 11:42:41 +02002761 if (chk_length < clen) {
tuexendd729232011-11-01 23:04:43 +00002762 /*
2763 * Need to send an abort since we had a
2764 * invalid data chunk.
2765 */
2766 struct mbuf *op_err;
t00fcxen08f9ff92014-03-16 13:38:54 +00002767 char msg[SCTP_DIAG_INFO_LEN];
tuexendd729232011-11-01 23:04:43 +00002768
Michael Tuexen48b98022017-10-18 23:12:24 +02002769 snprintf(msg, sizeof(msg), "%s chunk of length %u",
Michael Tuexen97aebbe2017-10-18 22:22:05 +02002770 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
t00fcxen08f9ff92014-03-16 13:38:54 +00002771 chk_length);
2772 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexene5001952016-04-17 19:25:27 +02002773 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
Michael Tüxen9843e062015-08-02 18:10:36 +02002774 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
t00fcxen227f8db2014-04-19 19:44:25 +00002775 return (2);
2776 }
tuexendd729232011-11-01 23:04:43 +00002777#ifdef SCTP_AUDITING_ENABLED
2778 sctp_audit_log(0xB1, 0);
2779#endif
2780 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2781 last_chunk = 1;
2782 } else {
2783 last_chunk = 0;
2784 }
Michael Tuexene5001952016-04-17 19:25:27 +02002785 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
tuexendd729232011-11-01 23:04:43 +00002786 chk_length, net, high_tsn, &abort_flag, &break_flag,
Michael Tuexene5001952016-04-17 19:25:27 +02002787 last_chunk, ch->chunk_type)) {
tuexendd729232011-11-01 23:04:43 +00002788 num_chunks++;
2789 }
2790 if (abort_flag)
2791 return (2);
2792
2793 if (break_flag) {
2794 /*
2795 * Set because of out of rwnd space and no
2796 * drop rep space left.
2797 */
2798 stop_proc = 1;
tuexen63fc0bb2011-12-27 12:24:52 +00002799 continue;
tuexendd729232011-11-01 23:04:43 +00002800 }
2801 } else {
2802 /* not a data chunk in the data region */
Michael Tuexene5001952016-04-17 19:25:27 +02002803 switch (ch->chunk_type) {
tuexendd729232011-11-01 23:04:43 +00002804 case SCTP_INITIATION:
2805 case SCTP_INITIATION_ACK:
2806 case SCTP_SELECTIVE_ACK:
tuexen63fc0bb2011-12-27 12:24:52 +00002807 case SCTP_NR_SELECTIVE_ACK:
tuexendd729232011-11-01 23:04:43 +00002808 case SCTP_HEARTBEAT_REQUEST:
2809 case SCTP_HEARTBEAT_ACK:
2810 case SCTP_ABORT_ASSOCIATION:
2811 case SCTP_SHUTDOWN:
2812 case SCTP_SHUTDOWN_ACK:
2813 case SCTP_OPERATION_ERROR:
2814 case SCTP_COOKIE_ECHO:
2815 case SCTP_COOKIE_ACK:
2816 case SCTP_ECN_ECHO:
2817 case SCTP_ECN_CWR:
2818 case SCTP_SHUTDOWN_COMPLETE:
2819 case SCTP_AUTHENTICATION:
2820 case SCTP_ASCONF_ACK:
2821 case SCTP_PACKET_DROPPED:
2822 case SCTP_STREAM_RESET:
2823 case SCTP_FORWARD_CUM_TSN:
2824 case SCTP_ASCONF:
Michael Tuexen0ec21502016-05-12 18:39:01 +02002825 {
tuexendd729232011-11-01 23:04:43 +00002826 /*
2827 * Now, what do we do with KNOWN chunks that
2828 * are NOT in the right place?
2829 *
2830 * For now, I do nothing but ignore them. We
2831 * may later want to add sysctl stuff to
2832 * switch out and do either an ABORT() or
2833 * possibly process them.
2834 */
Michael Tuexen0ec21502016-05-12 18:39:01 +02002835 struct mbuf *op_err;
2836 char msg[SCTP_DIAG_INFO_LEN];
tuexendd729232011-11-01 23:04:43 +00002837
Michael Tuexen0ec21502016-05-12 18:39:01 +02002838 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2839 ch->chunk_type);
2840 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2841 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2842 return (2);
2843 }
tuexendd729232011-11-01 23:04:43 +00002844 default:
Michael Tuexen97aebbe2017-10-18 22:22:05 +02002845 /*
2846 * Unknown chunk type: use bit rules after
2847 * checking length
2848 */
2849 if (chk_length < sizeof(struct sctp_chunkhdr)) {
2850 /*
2851 * Need to send an abort since we had a
2852 * invalid chunk.
2853 */
2854 struct mbuf *op_err;
2855 char msg[SCTP_DIAG_INFO_LEN];
2856
Michael Tuexen48b98022017-10-18 23:12:24 +02002857 snprintf(msg, sizeof(msg), "Chunk of length %u",
Michael Tuexen97aebbe2017-10-18 22:22:05 +02002858 chk_length);
2859 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2860 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2861 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2862 return (2);
2863 }
Michael Tuexene5001952016-04-17 19:25:27 +02002864 if (ch->chunk_type & 0x40) {
tuexendd729232011-11-01 23:04:43 +00002865 /* Add a error report to the queue */
Michael Tuexenf39c4292015-09-12 19:39:48 +02002866 struct mbuf *op_err;
2867 struct sctp_gen_error_cause *cause;
tuexendd729232011-11-01 23:04:43 +00002868
Michael Tuexenf39c4292015-09-12 19:39:48 +02002869 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2870 0, M_NOWAIT, 1, MT_DATA);
2871 if (op_err != NULL) {
2872 cause = mtod(op_err, struct sctp_gen_error_cause *);
2873 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
Michael Tuexen1ce9b132016-03-25 15:03:49 +01002874 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
Michael Tuexenf39c4292015-09-12 19:39:48 +02002875 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2876 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2877 if (SCTP_BUF_NEXT(op_err) != NULL) {
2878 sctp_queue_op_err(stcb, op_err);
tuexendd729232011-11-01 23:04:43 +00002879 } else {
Michael Tuexenf39c4292015-09-12 19:39:48 +02002880 sctp_m_freem(op_err);
tuexendd729232011-11-01 23:04:43 +00002881 }
2882 }
2883 }
Michael Tuexene5001952016-04-17 19:25:27 +02002884 if ((ch->chunk_type & 0x80) == 0) {
tuexendd729232011-11-01 23:04:43 +00002885 /* discard the rest of this packet */
2886 stop_proc = 1;
2887 } /* else skip this bad chunk and
2888 * continue... */
2889 break;
tuexen63fc0bb2011-12-27 12:24:52 +00002890 } /* switch of chunk type */
tuexendd729232011-11-01 23:04:43 +00002891 }
2892 *offset += SCTP_SIZE32(chk_length);
2893 if ((*offset >= length) || stop_proc) {
2894 /* no more data left in the mbuf chain */
2895 stop_proc = 1;
2896 continue;
2897 }
Michael Tuexene5001952016-04-17 19:25:27 +02002898 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
Michael Tuexen97aebbe2017-10-18 22:22:05 +02002899 sizeof(struct sctp_chunkhdr),
2900 (uint8_t *)&chunk_buf);
tuexendd729232011-11-01 23:04:43 +00002901 if (ch == NULL) {
2902 *offset = length;
2903 stop_proc = 1;
tuexen63fc0bb2011-12-27 12:24:52 +00002904 continue;
tuexendd729232011-11-01 23:04:43 +00002905 }
tuexen63fc0bb2011-12-27 12:24:52 +00002906 }
tuexendd729232011-11-01 23:04:43 +00002907 if (break_flag) {
2908 /*
2909 * we need to report rwnd overrun drops.
2910 */
tuexen3caef192012-06-24 23:24:06 +00002911 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
tuexendd729232011-11-01 23:04:43 +00002912 }
2913 if (num_chunks) {
2914 /*
2915 * Did we get data, if so update the time for auto-close and
2916 * give peer credit for being alive.
2917 */
2918 SCTP_STAT_INCR(sctps_recvpktwithdata);
2919 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2920 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2921 stcb->asoc.overall_error_count,
2922 0,
2923 SCTP_FROM_SCTP_INDATA,
2924 __LINE__);
2925 }
2926 stcb->asoc.overall_error_count = 0;
2927 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2928 }
2929 /* now service all of the reassm queue if needed */
Michael Tuexen348a36c2018-08-13 16:24:47 +02002930 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
tuexendd729232011-11-01 23:04:43 +00002931 /* Assure that we ack right away */
2932 stcb->asoc.send_sack = 1;
2933 }
2934 /* Start a sack timer or QUEUE a SACK for sending */
tuexen9784e9a2011-12-18 13:04:23 +00002935 sctp_sack_check(stcb, was_a_gap);
tuexendd729232011-11-01 23:04:43 +00002936 return (0);
2937}
2938
2939static int
2940sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2941 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2942 int *num_frs,
2943 uint32_t *biggest_newly_acked_tsn,
2944 uint32_t *this_sack_lowest_newack,
tuexen9784e9a2011-12-18 13:04:23 +00002945 int *rto_ok)
tuexendd729232011-11-01 23:04:43 +00002946{
2947 struct sctp_tmit_chunk *tp1;
2948 unsigned int theTSN;
2949 int j, wake_him = 0, circled = 0;
2950
2951 /* Recover the tp1 we last saw */
2952 tp1 = *p_tp1;
2953 if (tp1 == NULL) {
2954 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2955 }
2956 for (j = frag_strt; j <= frag_end; j++) {
2957 theTSN = j + last_tsn;
2958 while (tp1) {
2959 if (tp1->rec.data.doing_fast_retransmit)
2960 (*num_frs) += 1;
2961
2962 /*-
2963 * CMT: CUCv2 algorithm. For each TSN being
2964 * processed from the sent queue, track the
2965 * next expected pseudo-cumack, or
2966 * rtx_pseudo_cumack, if required. Separate
2967 * cumack trackers for first transmissions,
2968 * and retransmissions.
2969 */
t00fcxenf95cdf42015-03-24 15:12:04 +00002970 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2971 (tp1->whoTo->find_pseudo_cumack == 1) &&
tuexendd729232011-11-01 23:04:43 +00002972 (tp1->snd_count == 1)) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01002973 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00002974 tp1->whoTo->find_pseudo_cumack = 0;
2975 }
t00fcxenf95cdf42015-03-24 15:12:04 +00002976 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2977 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
tuexendd729232011-11-01 23:04:43 +00002978 (tp1->snd_count > 1)) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01002979 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00002980 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2981 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01002982 if (tp1->rec.data.tsn == theTSN) {
tuexendd729232011-11-01 23:04:43 +00002983 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2984 /*-
2985 * must be held until
2986 * cum-ack passes
2987 */
2988 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2989 /*-
2990 * If it is less than RESEND, it is
2991 * now no-longer in flight.
2992 * Higher values may already be set
2993 * via previous Gap Ack Blocks...
2994 * i.e. ACKED or RESEND.
2995 */
Michael Tuexen00657ac2016-12-07 21:53:26 +01002996 if (SCTP_TSN_GT(tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00002997 *biggest_newly_acked_tsn)) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01002998 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00002999 }
3000 /*-
3001 * CMT: SFR algo (and HTNA) - set
3002 * saw_newack to 1 for dest being
3003 * newly acked. update
3004 * this_sack_highest_newack if
3005 * appropriate.
3006 */
3007 if (tp1->rec.data.chunk_was_revoked == 0)
3008 tp1->whoTo->saw_newack = 1;
3009
Michael Tuexen00657ac2016-12-07 21:53:26 +01003010 if (SCTP_TSN_GT(tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003011 tp1->whoTo->this_sack_highest_newack)) {
3012 tp1->whoTo->this_sack_highest_newack =
Michael Tuexen00657ac2016-12-07 21:53:26 +01003013 tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00003014 }
3015 /*-
3016 * CMT DAC algo: also update
3017 * this_sack_lowest_newack
3018 */
3019 if (*this_sack_lowest_newack == 0) {
3020 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3021 sctp_log_sack(*this_sack_lowest_newack,
3022 last_tsn,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003023 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003024 0,
3025 0,
3026 SCTP_LOG_TSN_ACKED);
3027 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01003028 *this_sack_lowest_newack = tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00003029 }
3030 /*-
3031 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3032 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3033 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3034 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3035 * Separate pseudo_cumack trackers for first transmissions and
3036 * retransmissions.
3037 */
Michael Tuexen00657ac2016-12-07 21:53:26 +01003038 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
tuexendd729232011-11-01 23:04:43 +00003039 if (tp1->rec.data.chunk_was_revoked == 0) {
3040 tp1->whoTo->new_pseudo_cumack = 1;
3041 }
3042 tp1->whoTo->find_pseudo_cumack = 1;
3043 }
3044 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003045 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
tuexendd729232011-11-01 23:04:43 +00003046 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01003047 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
tuexendd729232011-11-01 23:04:43 +00003048 if (tp1->rec.data.chunk_was_revoked == 0) {
3049 tp1->whoTo->new_pseudo_cumack = 1;
3050 }
3051 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3052 }
3053 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3054 sctp_log_sack(*biggest_newly_acked_tsn,
3055 last_tsn,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003056 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003057 frag_strt,
3058 frag_end,
3059 SCTP_LOG_TSN_ACKED);
3060 }
3061 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3062 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3063 tp1->whoTo->flight_size,
3064 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01003065 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003066 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00003067 }
3068 sctp_flight_size_decrease(tp1);
3069 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3070 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3071 tp1);
3072 }
3073 sctp_total_flight_decrease(stcb, tp1);
3074
3075 tp1->whoTo->net_ack += tp1->send_size;
3076 if (tp1->snd_count < 2) {
3077 /*-
Michael Tuexenc51af972018-08-12 15:32:55 +02003078 * True non-retransmitted chunk
tuexendd729232011-11-01 23:04:43 +00003079 */
3080 tp1->whoTo->net_ack2 += tp1->send_size;
3081
3082 /*-
3083 * update RTO too ?
3084 */
3085 if (tp1->do_rtt) {
3086 if (*rto_ok) {
3087 tp1->whoTo->RTO =
3088 sctp_calculate_rto(stcb,
3089 &stcb->asoc,
3090 tp1->whoTo,
3091 &tp1->sent_rcv_time,
tuexendd729232011-11-01 23:04:43 +00003092 SCTP_RTT_FROM_DATA);
3093 *rto_ok = 0;
3094 }
3095 if (tp1->whoTo->rto_needed == 0) {
3096 tp1->whoTo->rto_needed = 1;
3097 }
3098 tp1->do_rtt = 0;
3099 }
3100 }
3101
3102 }
3103 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003104 if (SCTP_TSN_GT(tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003105 stcb->asoc.this_sack_highest_gap)) {
3106 stcb->asoc.this_sack_highest_gap =
Michael Tuexen00657ac2016-12-07 21:53:26 +01003107 tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00003108 }
3109 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3110 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3111#ifdef SCTP_AUDITING_ENABLED
3112 sctp_audit_log(0xB2,
3113 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3114#endif
3115 }
3116 }
3117 /*-
3118 * All chunks NOT UNSENT fall through here and are marked
3119 * (leave PR-SCTP ones that are to skip alone though)
3120 */
t00fcxen9ad90772012-11-07 22:19:57 +00003121 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
t00fcxen8fcc5142012-11-16 19:46:12 +00003122 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
tuexendd729232011-11-01 23:04:43 +00003123 tp1->sent = SCTP_DATAGRAM_MARKED;
t00fcxen9ad90772012-11-07 22:19:57 +00003124 }
tuexendd729232011-11-01 23:04:43 +00003125 if (tp1->rec.data.chunk_was_revoked) {
3126 /* deflate the cwnd */
3127 tp1->whoTo->cwnd -= tp1->book_size;
3128 tp1->rec.data.chunk_was_revoked = 0;
3129 }
3130 /* NR Sack code here */
t00fcxen8fcc5142012-11-16 19:46:12 +00003131 if (nr_sacking &&
3132 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003133 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3134 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
t00fcxen8fcc5142012-11-16 19:46:12 +00003135#ifdef INVARIANTS
3136 } else {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003137 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
t00fcxen8fcc5142012-11-16 19:46:12 +00003138#endif
3139 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01003140 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3141 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3142 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
Michael Tuexenc0a12d12015-12-03 16:30:24 +01003143 stcb->asoc.trigger_reset = 1;
3144 }
t00fcxen8fcc5142012-11-16 19:46:12 +00003145 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
tuexendd729232011-11-01 23:04:43 +00003146 if (tp1->data) {
3147 /* sa_ignore NO_NULL_CHK */
3148 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3149 sctp_m_freem(tp1->data);
3150 tp1->data = NULL;
3151 }
3152 wake_him++;
3153 }
3154 }
3155 break;
Michael Tuexen00657ac2016-12-07 21:53:26 +01003156 } /* if (tp1->tsn == theTSN) */
3157 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
tuexendd729232011-11-01 23:04:43 +00003158 break;
3159 }
3160 tp1 = TAILQ_NEXT(tp1, sctp_next);
3161 if ((tp1 == NULL) && (circled == 0)) {
3162 circled++;
3163 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3164 }
3165 } /* end while (tp1) */
3166 if (tp1 == NULL) {
3167 circled = 0;
3168 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3169 }
3170 /* In case the fragments were not in order we must reset */
3171 } /* end for (j = fragStart */
3172 *p_tp1 = tp1;
3173 return (wake_him); /* Return value only used for nr-sack */
3174}
3175
3176
3177static int
3178sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3179 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3180 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
tuexen9784e9a2011-12-18 13:04:23 +00003181 int num_seg, int num_nr_seg, int *rto_ok)
tuexendd729232011-11-01 23:04:43 +00003182{
3183 struct sctp_gap_ack_block *frag, block;
3184 struct sctp_tmit_chunk *tp1;
3185 int i;
3186 int num_frs = 0;
3187 int chunk_freed;
3188 int non_revocable;
3189 uint16_t frag_strt, frag_end, prev_frag_end;
3190
3191 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3192 prev_frag_end = 0;
3193 chunk_freed = 0;
3194
3195 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3196 if (i == num_seg) {
3197 prev_frag_end = 0;
3198 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3199 }
3200 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3201 sizeof(struct sctp_gap_ack_block), (uint8_t *) &block);
3202 *offset += sizeof(block);
3203 if (frag == NULL) {
3204 return (chunk_freed);
3205 }
3206 frag_strt = ntohs(frag->start);
3207 frag_end = ntohs(frag->end);
3208
3209 if (frag_strt > frag_end) {
3210 /* This gap report is malformed, skip it. */
3211 continue;
3212 }
3213 if (frag_strt <= prev_frag_end) {
3214 /* This gap report is not in order, so restart. */
3215 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3216 }
3217 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3218 *biggest_tsn_acked = last_tsn + frag_end;
3219 }
3220 if (i < num_seg) {
3221 non_revocable = 0;
3222 } else {
3223 non_revocable = 1;
3224 }
3225 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3226 non_revocable, &num_frs, biggest_newly_acked_tsn,
tuexen9784e9a2011-12-18 13:04:23 +00003227 this_sack_lowest_newack, rto_ok)) {
tuexendd729232011-11-01 23:04:43 +00003228 chunk_freed = 1;
3229 }
3230 prev_frag_end = frag_end;
3231 }
3232 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3233 if (num_frs)
3234 sctp_log_fr(*biggest_tsn_acked,
3235 *biggest_newly_acked_tsn,
3236 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3237 }
3238 return (chunk_freed);
3239}
3240
3241static void
3242sctp_check_for_revoked(struct sctp_tcb *stcb,
3243 struct sctp_association *asoc, uint32_t cumack,
3244 uint32_t biggest_tsn_acked)
3245{
3246 struct sctp_tmit_chunk *tp1;
tuexendd729232011-11-01 23:04:43 +00003247
3248 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003249 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
tuexendd729232011-11-01 23:04:43 +00003250 /*
3251 * ok this guy is either ACK or MARKED. If it is
3252 * ACKED it has been previously acked but not this
3253 * time i.e. revoked. If it is MARKED it was ACK'ed
3254 * again.
3255 */
Michael Tuexen00657ac2016-12-07 21:53:26 +01003256 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
tuexendd729232011-11-01 23:04:43 +00003257 break;
3258 }
3259 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3260 /* it has been revoked */
3261 tp1->sent = SCTP_DATAGRAM_SENT;
3262 tp1->rec.data.chunk_was_revoked = 1;
3263 /* We must add this stuff back in to
3264 * assure timers and such get started.
3265 */
3266 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3267 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3268 tp1->whoTo->flight_size,
3269 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01003270 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003271 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00003272 }
3273 sctp_flight_size_increase(tp1);
3274 sctp_total_flight_increase(stcb, tp1);
3275 /* We inflate the cwnd to compensate for our
3276 * artificial inflation of the flight_size.
3277 */
3278 tp1->whoTo->cwnd += tp1->book_size;
tuexendd729232011-11-01 23:04:43 +00003279 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3280 sctp_log_sack(asoc->last_acked_seq,
3281 cumack,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003282 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003283 0,
3284 0,
3285 SCTP_LOG_TSN_REVOKED);
3286 }
3287 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3288 /* it has been re-acked in this SACK */
3289 tp1->sent = SCTP_DATAGRAM_ACKED;
3290 }
3291 }
3292 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3293 break;
3294 }
3295}
3296
3297
3298static void
3299sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3300 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3301{
3302 struct sctp_tmit_chunk *tp1;
3303 int strike_flag = 0;
3304 struct timeval now;
3305 int tot_retrans = 0;
3306 uint32_t sending_seq;
3307 struct sctp_nets *net;
3308 int num_dests_sacked = 0;
3309
3310 /*
3311 * select the sending_seq, this is either the next thing ready to be
3312 * sent but not transmitted, OR, the next seq we assign.
3313 */
3314 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3315 if (tp1 == NULL) {
3316 sending_seq = asoc->sending_seq;
3317 } else {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003318 sending_seq = tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00003319 }
3320
3321 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3322 if ((asoc->sctp_cmt_on_off > 0) &&
3323 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3324 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3325 if (net->saw_newack)
3326 num_dests_sacked++;
3327 }
3328 }
t00fcxen0e78cef2014-08-02 22:05:33 +00003329 if (stcb->asoc.prsctp_supported) {
tuexendd729232011-11-01 23:04:43 +00003330 (void)SCTP_GETTIME_TIMEVAL(&now);
3331 }
3332 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3333 strike_flag = 0;
3334 if (tp1->no_fr_allowed) {
3335 /* this one had a timeout or something */
3336 continue;
3337 }
3338 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3339 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3340 sctp_log_fr(biggest_tsn_newly_acked,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003341 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003342 tp1->sent,
3343 SCTP_FR_LOG_CHECK_STRIKE);
3344 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01003345 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
tuexendd729232011-11-01 23:04:43 +00003346 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3347 /* done */
3348 break;
3349 }
t00fcxen0e78cef2014-08-02 22:05:33 +00003350 if (stcb->asoc.prsctp_supported) {
tuexendd729232011-11-01 23:04:43 +00003351 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3352 /* Is it expired? */
3353#ifndef __FreeBSD__
3354 if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
3355#else
3356 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3357#endif
3358 /* Yes so drop it */
3359 if (tp1->data != NULL) {
tuexenda53ff02012-05-14 09:00:59 +00003360 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
tuexendd729232011-11-01 23:04:43 +00003361 SCTP_SO_NOT_LOCKED);
3362 }
3363 continue;
3364 }
3365 }
3366
3367 }
Michael Tuexen83714a82018-01-16 23:02:09 +01003368 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3369 !(accum_moved && asoc->fast_retran_loss_recovery)) {
tuexendd729232011-11-01 23:04:43 +00003370 /* we are beyond the tsn in the sack */
3371 break;
3372 }
3373 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3374 /* either a RESEND, ACKED, or MARKED */
3375 /* skip */
3376 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3377 /* Continue strikin FWD-TSN chunks */
3378 tp1->rec.data.fwd_tsn_cnt++;
3379 }
3380 continue;
3381 }
3382 /*
3383 * CMT : SFR algo (covers part of DAC and HTNA as well)
3384 */
3385 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3386 /*
3387 * No new acks were receieved for data sent to this
3388 * dest. Therefore, according to the SFR algo for
3389 * CMT, no data sent to this dest can be marked for
3390 * FR using this SACK.
3391 */
3392 continue;
Michael Tuexen83714a82018-01-16 23:02:09 +01003393 } else if (tp1->whoTo &&
3394 SCTP_TSN_GT(tp1->rec.data.tsn,
3395 tp1->whoTo->this_sack_highest_newack) &&
3396 !(accum_moved && asoc->fast_retran_loss_recovery)) {
tuexendd729232011-11-01 23:04:43 +00003397 /*
3398 * CMT: New acks were receieved for data sent to
3399 * this dest. But no new acks were seen for data
3400 * sent after tp1. Therefore, according to the SFR
3401 * algo for CMT, tp1 cannot be marked for FR using
3402 * this SACK. This step covers part of the DAC algo
3403 * and the HTNA algo as well.
3404 */
3405 continue;
3406 }
3407 /*
3408 * Here we check to see if we were have already done a FR
3409 * and if so we see if the biggest TSN we saw in the sack is
3410 * smaller than the recovery point. If so we don't strike
3411 * the tsn... otherwise we CAN strike the TSN.
3412 */
3413 /*
3414 * @@@ JRI: Check for CMT
3415 * if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) {
3416 */
3417 if (accum_moved && asoc->fast_retran_loss_recovery) {
3418 /*
3419 * Strike the TSN if in fast-recovery and cum-ack
3420 * moved.
3421 */
3422 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3423 sctp_log_fr(biggest_tsn_newly_acked,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003424 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003425 tp1->sent,
3426 SCTP_FR_LOG_STRIKE_CHUNK);
3427 }
3428 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3429 tp1->sent++;
3430 }
3431 if ((asoc->sctp_cmt_on_off > 0) &&
3432 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3433 /*
3434 * CMT DAC algorithm: If SACK flag is set to
3435 * 0, then lowest_newack test will not pass
3436 * because it would have been set to the
3437 * cumack earlier. If not already to be
3438 * rtx'd, If not a mixed sack and if tp1 is
3439 * not between two sacked TSNs, then mark by
3440 * one more.
3441 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3442 * two packets have been received after this missing TSN.
3443 */
3444 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
Michael Tuexen00657ac2016-12-07 21:53:26 +01003445 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
tuexendd729232011-11-01 23:04:43 +00003446 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3447 sctp_log_fr(16 + num_dests_sacked,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003448 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003449 tp1->sent,
3450 SCTP_FR_LOG_STRIKE_CHUNK);
3451 }
3452 tp1->sent++;
3453 }
3454 }
3455 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3456 (asoc->sctp_cmt_on_off == 0)) {
3457 /*
3458 * For those that have done a FR we must take
3459 * special consideration if we strike. I.e the
3460 * biggest_newly_acked must be higher than the
3461 * sending_seq at the time we did the FR.
3462 */
3463 if (
3464#ifdef SCTP_FR_TO_ALTERNATE
3465 /*
3466 * If FR's go to new networks, then we must only do
3467 * this for singly homed asoc's. However if the FR's
3468 * go to the same network (Armando's work) then its
3469 * ok to FR multiple times.
3470 */
3471 (asoc->numnets < 2)
3472#else
3473 (1)
3474#endif
3475 ) {
3476
3477 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3478 tp1->rec.data.fast_retran_tsn)) {
3479 /*
3480 * Strike the TSN, since this ack is
3481 * beyond where things were when we
3482 * did a FR.
3483 */
3484 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3485 sctp_log_fr(biggest_tsn_newly_acked,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003486 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003487 tp1->sent,
3488 SCTP_FR_LOG_STRIKE_CHUNK);
3489 }
3490 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3491 tp1->sent++;
3492 }
3493 strike_flag = 1;
3494 if ((asoc->sctp_cmt_on_off > 0) &&
3495 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3496 /*
3497 * CMT DAC algorithm: If
3498 * SACK flag is set to 0,
3499 * then lowest_newack test
3500 * will not pass because it
3501 * would have been set to
3502 * the cumack earlier. If
3503 * not already to be rtx'd,
3504 * If not a mixed sack and
3505 * if tp1 is not between two
3506 * sacked TSNs, then mark by
3507 * one more.
3508 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3509 * two packets have been received after this missing TSN.
3510 */
3511 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3512 (num_dests_sacked == 1) &&
3513 SCTP_TSN_GT(this_sack_lowest_newack,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003514 tp1->rec.data.tsn)) {
tuexendd729232011-11-01 23:04:43 +00003515 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3516 sctp_log_fr(32 + num_dests_sacked,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003517 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003518 tp1->sent,
3519 SCTP_FR_LOG_STRIKE_CHUNK);
3520 }
3521 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3522 tp1->sent++;
3523 }
3524 }
3525 }
3526 }
3527 }
3528 /*
3529 * JRI: TODO: remove code for HTNA algo. CMT's
3530 * SFR algo covers HTNA.
3531 */
Michael Tuexen00657ac2016-12-07 21:53:26 +01003532 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003533 biggest_tsn_newly_acked)) {
3534 /*
3535 * We don't strike these: This is the HTNA
3536 * algorithm i.e. we don't strike If our TSN is
3537 * larger than the Highest TSN Newly Acked.
3538 */
3539 ;
3540 } else {
3541 /* Strike the TSN */
3542 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3543 sctp_log_fr(biggest_tsn_newly_acked,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003544 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003545 tp1->sent,
3546 SCTP_FR_LOG_STRIKE_CHUNK);
3547 }
3548 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3549 tp1->sent++;
3550 }
3551 if ((asoc->sctp_cmt_on_off > 0) &&
3552 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3553 /*
3554 * CMT DAC algorithm: If SACK flag is set to
3555 * 0, then lowest_newack test will not pass
3556 * because it would have been set to the
3557 * cumack earlier. If not already to be
3558 * rtx'd, If not a mixed sack and if tp1 is
3559 * not between two sacked TSNs, then mark by
3560 * one more.
3561 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3562 * two packets have been received after this missing TSN.
3563 */
3564 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
Michael Tuexen00657ac2016-12-07 21:53:26 +01003565 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
tuexendd729232011-11-01 23:04:43 +00003566 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3567 sctp_log_fr(48 + num_dests_sacked,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003568 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00003569 tp1->sent,
3570 SCTP_FR_LOG_STRIKE_CHUNK);
3571 }
3572 tp1->sent++;
3573 }
3574 }
3575 }
3576 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3577 struct sctp_nets *alt;
3578
3579 /* fix counts and things */
3580 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3581 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3582 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3583 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01003584 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003585 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00003586 }
3587 if (tp1->whoTo) {
3588 tp1->whoTo->net_ack++;
3589 sctp_flight_size_decrease(tp1);
3590 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3591 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3592 tp1);
3593 }
3594 }
tuexen15f99d82012-04-19 16:08:38 +00003595
tuexendd729232011-11-01 23:04:43 +00003596 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3597 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3598 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3599 }
3600 /* add back to the rwnd */
3601 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
tuexen15f99d82012-04-19 16:08:38 +00003602
tuexendd729232011-11-01 23:04:43 +00003603 /* remove from the total flight */
3604 sctp_total_flight_decrease(stcb, tp1);
3605
t00fcxen0e78cef2014-08-02 22:05:33 +00003606 if ((stcb->asoc.prsctp_supported) &&
tuexendd729232011-11-01 23:04:43 +00003607 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3608 /* Has it been retransmitted tv_sec times? - we store the retran count there. */
3609 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3610 /* Yes, so drop it */
3611 if (tp1->data != NULL) {
tuexenda53ff02012-05-14 09:00:59 +00003612 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
tuexendd729232011-11-01 23:04:43 +00003613 SCTP_SO_NOT_LOCKED);
3614 }
3615 /* Make sure to flag we had a FR */
Michael Tuexen4d933602018-05-06 16:23:44 +02003616 if (tp1->whoTo != NULL) {
3617 tp1->whoTo->net_ack++;
3618 }
tuexendd729232011-11-01 23:04:43 +00003619 continue;
3620 }
tuexen15f99d82012-04-19 16:08:38 +00003621 }
tuexencb5fe8d2012-05-04 09:50:27 +00003622 /* SCTP_PRINTF("OK, we are now ready to FR this guy\n"); */
tuexendd729232011-11-01 23:04:43 +00003623 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01003624 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
tuexendd729232011-11-01 23:04:43 +00003625 0, SCTP_FR_MARKED);
3626 }
3627 if (strike_flag) {
3628 /* This is a subsequent FR */
3629 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3630 }
3631 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3632 if (asoc->sctp_cmt_on_off > 0) {
3633 /*
3634 * CMT: Using RTX_SSTHRESH policy for CMT.
3635 * If CMT is being used, then pick dest with
3636 * largest ssthresh for any retransmission.
3637 */
3638 tp1->no_fr_allowed = 1;
3639 alt = tp1->whoTo;
3640 /*sa_ignore NO_NULL_CHK*/
3641 if (asoc->sctp_cmt_pf > 0) {
3642 /* JRS 5/18/07 - If CMT PF is on, use the PF version of find_alt_net() */
3643 alt = sctp_find_alternate_net(stcb, alt, 2);
3644 } else {
3645 /* JRS 5/18/07 - If only CMT is on, use the CMT version of find_alt_net() */
3646 /*sa_ignore NO_NULL_CHK*/
3647 alt = sctp_find_alternate_net(stcb, alt, 1);
3648 }
3649 if (alt == NULL) {
3650 alt = tp1->whoTo;
3651 }
3652 /*
3653 * CUCv2: If a different dest is picked for
3654 * the retransmission, then new
3655 * (rtx-)pseudo_cumack needs to be tracked
3656 * for orig dest. Let CUCv2 track new (rtx-)
3657 * pseudo-cumack always.
3658 */
3659 if (tp1->whoTo) {
3660 tp1->whoTo->find_pseudo_cumack = 1;
3661 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3662 }
3663
3664 } else {/* CMT is OFF */
3665
3666#ifdef SCTP_FR_TO_ALTERNATE
3667 /* Can we find an alternate? */
3668 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3669#else
3670 /*
3671 * default behavior is to NOT retransmit
3672 * FR's to an alternate. Armando Caro's
3673 * paper details why.
3674 */
3675 alt = tp1->whoTo;
3676#endif
3677 }
3678
3679 tp1->rec.data.doing_fast_retransmit = 1;
3680 tot_retrans++;
3681 /* mark the sending seq for possible subsequent FR's */
3682 /*
tuexencb5fe8d2012-05-04 09:50:27 +00003683 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01003684 * (uint32_t)tpi->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00003685 */
3686 if (TAILQ_EMPTY(&asoc->send_queue)) {
3687 /*
3688 * If the queue of send is empty then its
3689 * the next sequence number that will be
3690 * assigned so we subtract one from this to
3691 * get the one we last sent.
3692 */
3693 tp1->rec.data.fast_retran_tsn = sending_seq;
3694 } else {
3695 /*
3696 * If there are chunks on the send queue
3697 * (unsent data that has made it from the
3698 * stream queues but not out the door, we
3699 * take the first one (which will have the
3700 * lowest TSN) and subtract one to get the
3701 * one we last sent.
3702 */
3703 struct sctp_tmit_chunk *ttt;
3704
3705 ttt = TAILQ_FIRST(&asoc->send_queue);
3706 tp1->rec.data.fast_retran_tsn =
Michael Tuexen00657ac2016-12-07 21:53:26 +01003707 ttt->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00003708 }
3709
3710 if (tp1->do_rtt) {
3711 /*
3712 * this guy had a RTO calculation pending on
3713 * it, cancel it
3714 */
tuexen63fc0bb2011-12-27 12:24:52 +00003715 if ((tp1->whoTo != NULL) &&
3716 (tp1->whoTo->rto_needed == 0)) {
tuexendd729232011-11-01 23:04:43 +00003717 tp1->whoTo->rto_needed = 1;
3718 }
3719 tp1->do_rtt = 0;
3720 }
3721 if (alt != tp1->whoTo) {
3722 /* yes, there is an alternate. */
3723 sctp_free_remote_addr(tp1->whoTo);
3724 /*sa_ignore FREED_MEMORY*/
3725 tp1->whoTo = alt;
3726 atomic_add_int(&alt->ref_count, 1);
3727 }
3728 }
3729 }
3730}
3731
3732struct sctp_tmit_chunk *
3733sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3734 struct sctp_association *asoc)
3735{
3736 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3737 struct timeval now;
3738 int now_filled = 0;
3739
t00fcxen0e78cef2014-08-02 22:05:33 +00003740 if (asoc->prsctp_supported == 0) {
tuexendd729232011-11-01 23:04:43 +00003741 return (NULL);
3742 }
3743 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3744 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
t00fcxen06a2a532012-11-07 21:03:47 +00003745 tp1->sent != SCTP_DATAGRAM_RESEND &&
t00fcxen8fcc5142012-11-16 19:46:12 +00003746 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
tuexendd729232011-11-01 23:04:43 +00003747 /* no chance to advance, out of here */
3748 break;
3749 }
3750 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
t00fcxen9ad90772012-11-07 22:19:57 +00003751 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
t00fcxen8fcc5142012-11-16 19:46:12 +00003752 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
tuexendd729232011-11-01 23:04:43 +00003753 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3754 asoc->advanced_peer_ack_point,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003755 tp1->rec.data.tsn, 0, 0);
tuexendd729232011-11-01 23:04:43 +00003756 }
3757 }
3758 if (!PR_SCTP_ENABLED(tp1->flags)) {
3759 /*
3760 * We can't fwd-tsn past any that are reliable aka
3761 * retransmitted until the asoc fails.
3762 */
3763 break;
3764 }
3765 if (!now_filled) {
3766 (void)SCTP_GETTIME_TIMEVAL(&now);
3767 now_filled = 1;
3768 }
3769 /*
3770 * now we got a chunk which is marked for another
3771 * retransmission to a PR-stream but has run out its chances
3772 * already maybe OR has been marked to skip now. Can we skip
3773 * it if its a resend?
3774 */
3775 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3776 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3777 /*
3778 * Now is this one marked for resend and its time is
3779 * now up?
3780 */
3781#ifndef __FreeBSD__
3782 if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
3783#else
3784 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3785#endif
3786 /* Yes so drop it */
3787 if (tp1->data) {
3788 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
tuexenda53ff02012-05-14 09:00:59 +00003789 1, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00003790 }
3791 } else {
3792 /*
3793 * No, we are done when hit one for resend
3794 * whos time as not expired.
3795 */
3796 break;
3797 }
3798 }
3799 /*
3800 * Ok now if this chunk is marked to drop it we can clean up
3801 * the chunk, advance our peer ack point and we can check
3802 * the next chunk.
3803 */
t00fcxen06a2a532012-11-07 21:03:47 +00003804 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
t00fcxen8fcc5142012-11-16 19:46:12 +00003805 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
tuexendd729232011-11-01 23:04:43 +00003806 /* advance PeerAckPoint goes forward */
Michael Tuexen00657ac2016-12-07 21:53:26 +01003807 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3808 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00003809 a_adv = tp1;
Michael Tuexen00657ac2016-12-07 21:53:26 +01003810 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
tuexendd729232011-11-01 23:04:43 +00003811 /* No update but we do save the chk */
3812 a_adv = tp1;
3813 }
3814 } else {
3815 /*
3816 * If it is still in RESEND we can advance no
3817 * further
3818 */
3819 break;
3820 }
3821 }
3822 return (a_adv);
3823}
3824
3825static int
3826sctp_fs_audit(struct sctp_association *asoc)
3827{
3828 struct sctp_tmit_chunk *chk;
tuexen63fc0bb2011-12-27 12:24:52 +00003829 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
t00fcxen006c3bc2015-05-28 14:33:28 +00003830 int ret;
3831#ifndef INVARIANTS
3832 int entry_flight, entry_cnt;
3833#endif
t00fcxen2ea88ad2014-02-20 20:24:25 +00003834
t00fcxen006c3bc2015-05-28 14:33:28 +00003835 ret = 0;
3836#ifndef INVARIANTS
tuexendd729232011-11-01 23:04:43 +00003837 entry_flight = asoc->total_flight;
3838 entry_cnt = asoc->total_flight_count;
t00fcxen006c3bc2015-05-28 14:33:28 +00003839#endif
tuexendd729232011-11-01 23:04:43 +00003840 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3841 return (0);
3842
3843 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3844 if (chk->sent < SCTP_DATAGRAM_RESEND) {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02003845 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01003846 chk->rec.data.tsn,
tuexencb5fe8d2012-05-04 09:50:27 +00003847 chk->send_size,
3848 chk->snd_count);
tuexendd729232011-11-01 23:04:43 +00003849 inflight++;
3850 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3851 resend++;
3852 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3853 inbetween++;
3854 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3855 above++;
3856 } else {
3857 acked++;
3858 }
3859 }
3860
3861 if ((inflight > 0) || (inbetween > 0)) {
3862#ifdef INVARIANTS
3863 panic("Flight size-express incorrect? \n");
3864#else
Michael Tuexeneccb4be2016-04-18 08:58:59 +02003865 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
tuexencb5fe8d2012-05-04 09:50:27 +00003866 entry_flight, entry_cnt);
tuexendd729232011-11-01 23:04:43 +00003867
Michael Tuexeneccb4be2016-04-18 08:58:59 +02003868 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
tuexendd729232011-11-01 23:04:43 +00003869 inflight, inbetween, resend, above, acked);
3870 ret = 1;
3871#endif
3872 }
3873 return (ret);
3874}
3875
3876
3877static void
3878sctp_window_probe_recovery(struct sctp_tcb *stcb,
3879 struct sctp_association *asoc,
tuexendd729232011-11-01 23:04:43 +00003880 struct sctp_tmit_chunk *tp1)
3881{
3882 tp1->window_probe = 0;
3883 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3884 /* TSN's skipped we do NOT move back. */
3885 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
t00fcxenf95cdf42015-03-24 15:12:04 +00003886 tp1->whoTo ? tp1->whoTo->flight_size : 0,
tuexendd729232011-11-01 23:04:43 +00003887 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01003888 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003889 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00003890 return;
3891 }
3892 /* First setup this by shrinking flight */
3893 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3894 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3895 tp1);
3896 }
3897 sctp_flight_size_decrease(tp1);
3898 sctp_total_flight_decrease(stcb, tp1);
3899 /* Now mark for resend */
3900 tp1->sent = SCTP_DATAGRAM_RESEND;
3901 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
tuexen15f99d82012-04-19 16:08:38 +00003902
tuexendd729232011-11-01 23:04:43 +00003903 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3904 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3905 tp1->whoTo->flight_size,
3906 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01003907 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01003908 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00003909 }
3910}
3911
3912void
3913sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3914 uint32_t rwnd, int *abort_now, int ecne_seen)
3915{
3916 struct sctp_nets *net;
3917 struct sctp_association *asoc;
3918 struct sctp_tmit_chunk *tp1, *tp2;
3919 uint32_t old_rwnd;
3920 int win_probe_recovery = 0;
3921 int win_probe_recovered = 0;
3922 int j, done_once = 0;
tuexen63fc0bb2011-12-27 12:24:52 +00003923 int rto_ok = 1;
Michael Tuexen0ec21502016-05-12 18:39:01 +02003924 uint32_t send_s;
tuexendd729232011-11-01 23:04:43 +00003925
3926 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3927 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3928 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3929 }
3930 SCTP_TCB_LOCK_ASSERT(stcb);
3931#ifdef SCTP_ASOCLOG_OF_TSNS
3932 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3933 stcb->asoc.cumack_log_at++;
3934 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3935 stcb->asoc.cumack_log_at = 0;
3936 }
3937#endif
3938 asoc = &stcb->asoc;
3939 old_rwnd = asoc->peers_rwnd;
3940 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3941 /* old ack */
3942 return;
3943 } else if (asoc->last_acked_seq == cumack) {
3944 /* Window update sack */
3945 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3946 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3947 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3948 /* SWS sender side engages */
3949 asoc->peers_rwnd = 0;
3950 }
3951 if (asoc->peers_rwnd > old_rwnd) {
3952 goto again;
3953 }
3954 return;
3955 }
3956
3957 /* First setup for CC stuff */
3958 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3959 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3960 /* Drag along the window_tsn for cwr's */
3961 net->cwr_window_tsn = cumack;
3962 }
3963 net->prev_cwnd = net->cwnd;
3964 net->net_ack = 0;
3965 net->net_ack2 = 0;
3966
3967 /*
3968 * CMT: Reset CUC and Fast recovery algo variables before
3969 * SACK processing
3970 */
3971 net->new_pseudo_cumack = 0;
3972 net->will_exit_fast_recovery = 0;
3973 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3974 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
3975 }
3976 }
Michael Tuexen0ec21502016-05-12 18:39:01 +02003977 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3978 tp1 = TAILQ_LAST(&asoc->sent_queue,
3979 sctpchunk_listhead);
Michael Tuexen00657ac2016-12-07 21:53:26 +01003980 send_s = tp1->rec.data.tsn + 1;
Michael Tuexen0ec21502016-05-12 18:39:01 +02003981 } else {
3982 send_s = asoc->sending_seq;
3983 }
3984 if (SCTP_TSN_GE(cumack, send_s)) {
3985 struct mbuf *op_err;
3986 char msg[SCTP_DIAG_INFO_LEN];
tuexendd729232011-11-01 23:04:43 +00003987
Michael Tuexen0ec21502016-05-12 18:39:01 +02003988 *abort_now = 1;
3989 /* XXX */
3990 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3991 cumack, send_s);
3992 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3993 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3994 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3995 return;
tuexendd729232011-11-01 23:04:43 +00003996 }
3997 asoc->this_sack_highest_gap = cumack;
3998 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3999 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4000 stcb->asoc.overall_error_count,
4001 0,
4002 SCTP_FROM_SCTP_INDATA,
4003 __LINE__);
4004 }
4005 stcb->asoc.overall_error_count = 0;
4006 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4007 /* process the new consecutive TSN first */
4008 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004009 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
tuexendd729232011-11-01 23:04:43 +00004010 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
tuexencb5fe8d2012-05-04 09:50:27 +00004011 SCTP_PRINTF("Warning, an unsent is now acked?\n");
tuexendd729232011-11-01 23:04:43 +00004012 }
4013 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4014 /*
4015 * If it is less than ACKED, it is
4016 * now no-longer in flight. Higher
4017 * values may occur during marking
4018 */
4019 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4020 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4021 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4022 tp1->whoTo->flight_size,
4023 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01004024 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01004025 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00004026 }
4027 sctp_flight_size_decrease(tp1);
4028 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4029 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
4030 tp1);
4031 }
4032 /* sa_ignore NO_NULL_CHK */
4033 sctp_total_flight_decrease(stcb, tp1);
4034 }
4035 tp1->whoTo->net_ack += tp1->send_size;
4036 if (tp1->snd_count < 2) {
4037 /*
Michael Tuexenc51af972018-08-12 15:32:55 +02004038 * True non-retransmitted
tuexendd729232011-11-01 23:04:43 +00004039 * chunk
4040 */
4041 tp1->whoTo->net_ack2 +=
4042 tp1->send_size;
4043
4044 /* update RTO too? */
4045 if (tp1->do_rtt) {
4046 if (rto_ok) {
4047 tp1->whoTo->RTO =
4048 /*
4049 * sa_ignore
4050 * NO_NULL_CHK
4051 */
4052 sctp_calculate_rto(stcb,
4053 asoc, tp1->whoTo,
4054 &tp1->sent_rcv_time,
tuexendd729232011-11-01 23:04:43 +00004055 SCTP_RTT_FROM_DATA);
4056 rto_ok = 0;
4057 }
4058 if (tp1->whoTo->rto_needed == 0) {
4059 tp1->whoTo->rto_needed = 1;
4060 }
4061 tp1->do_rtt = 0;
4062 }
4063 }
4064 /*
4065 * CMT: CUCv2 algorithm. From the
4066 * cumack'd TSNs, for each TSN being
4067 * acked for the first time, set the
4068 * following variables for the
4069 * corresp destination.
4070 * new_pseudo_cumack will trigger a
4071 * cwnd update.
4072 * find_(rtx_)pseudo_cumack will
4073 * trigger search for the next
4074 * expected (rtx-)pseudo-cumack.
4075 */
4076 tp1->whoTo->new_pseudo_cumack = 1;
4077 tp1->whoTo->find_pseudo_cumack = 1;
4078 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4079
4080 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4081 /* sa_ignore NO_NULL_CHK */
Michael Tuexen00657ac2016-12-07 21:53:26 +01004082 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
tuexendd729232011-11-01 23:04:43 +00004083 }
4084 }
4085 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4086 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4087 }
4088 if (tp1->rec.data.chunk_was_revoked) {
4089 /* deflate the cwnd */
4090 tp1->whoTo->cwnd -= tp1->book_size;
4091 tp1->rec.data.chunk_was_revoked = 0;
4092 }
t00fcxen8fcc5142012-11-16 19:46:12 +00004093 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004094 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4095 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
t00fcxen9ad90772012-11-07 22:19:57 +00004096#ifdef INVARIANTS
4097 } else {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004098 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
t00fcxen9ad90772012-11-07 22:19:57 +00004099#endif
4100 }
4101 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01004102 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4103 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4104 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
Michael Tuexenc0a12d12015-12-03 16:30:24 +01004105 asoc->trigger_reset = 1;
4106 }
tuexendd729232011-11-01 23:04:43 +00004107 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4108 if (tp1->data) {
4109 /* sa_ignore NO_NULL_CHK */
4110 sctp_free_bufspace(stcb, asoc, tp1, 1);
4111 sctp_m_freem(tp1->data);
4112 tp1->data = NULL;
4113 }
4114 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4115 sctp_log_sack(asoc->last_acked_seq,
4116 cumack,
Michael Tuexen00657ac2016-12-07 21:53:26 +01004117 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00004118 0,
4119 0,
4120 SCTP_LOG_FREE_SENT);
4121 }
4122 asoc->sent_queue_cnt--;
4123 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4124 } else {
4125 break;
4126 }
4127 }
4128
4129 }
tuexen6bffa9a2012-06-25 17:40:03 +00004130#if defined(__Userspace__)
tuexen98456cf2012-04-19 15:37:07 +00004131 if (stcb->sctp_ep->recv_callback) {
4132 if (stcb->sctp_socket) {
4133 uint32_t inqueue_bytes, sb_free_now;
4134 struct sctp_inpcb *inp;
tuexen749d8562011-11-13 13:41:49 +00004135
tuexen98456cf2012-04-19 15:37:07 +00004136 inp = stcb->sctp_ep;
tuexen749d8562011-11-13 13:41:49 +00004137 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
tuexen98456cf2012-04-19 15:37:07 +00004138 sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
4139
4140 /* check if the amount free in the send socket buffer crossed the threshold */
4141 if (inp->send_callback &&
4142 (((inp->send_sb_threshold > 0) &&
4143 (sb_free_now >= inp->send_sb_threshold) &&
4144 (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
4145 (inp->send_sb_threshold == 0))) {
4146 atomic_add_int(&stcb->asoc.refcnt, 1);
4147 SCTP_TCB_UNLOCK(stcb);
4148 inp->send_callback(stcb->sctp_socket, sb_free_now);
4149 SCTP_TCB_LOCK(stcb);
4150 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4151 }
tuexen749d8562011-11-13 13:41:49 +00004152 }
tuexen98456cf2012-04-19 15:37:07 +00004153 } else if (stcb->sctp_socket) {
tuexen749d8562011-11-13 13:41:49 +00004154#else
tuexendd729232011-11-01 23:04:43 +00004155 /* sa_ignore NO_NULL_CHK */
4156 if (stcb->sctp_socket) {
tuexen98456cf2012-04-19 15:37:07 +00004157#endif
tuexen6bffa9a2012-06-25 17:40:03 +00004158#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
tuexendd729232011-11-01 23:04:43 +00004159 struct socket *so;
4160
4161#endif
4162 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4163 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4164 /* sa_ignore NO_NULL_CHK */
tuexen9784e9a2011-12-18 13:04:23 +00004165 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
tuexendd729232011-11-01 23:04:43 +00004166 }
tuexen6bffa9a2012-06-25 17:40:03 +00004167#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
tuexendd729232011-11-01 23:04:43 +00004168 so = SCTP_INP_SO(stcb->sctp_ep);
4169 atomic_add_int(&stcb->asoc.refcnt, 1);
4170 SCTP_TCB_UNLOCK(stcb);
4171 SCTP_SOCKET_LOCK(so, 1);
4172 SCTP_TCB_LOCK(stcb);
4173 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4174 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4175 /* assoc was freed while we were unlocked */
4176 SCTP_SOCKET_UNLOCK(so, 1);
4177 return;
4178 }
4179#endif
4180 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
tuexen6bffa9a2012-06-25 17:40:03 +00004181#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
tuexendd729232011-11-01 23:04:43 +00004182 SCTP_SOCKET_UNLOCK(so, 1);
4183#endif
4184 } else {
4185 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
tuexen9784e9a2011-12-18 13:04:23 +00004186 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
tuexendd729232011-11-01 23:04:43 +00004187 }
4188 }
4189
4190 /* JRS - Use the congestion control given in the CC module */
4191 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4192 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4193 if (net->net_ack2 > 0) {
4194 /*
4195 * Karn's rule applies to clearing error count, this
4196 * is optional.
4197 */
4198 net->error_count = 0;
4199 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4200 /* addr came good */
4201 net->dest_state |= SCTP_ADDR_REACHABLE;
4202 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
tuexenda53ff02012-05-14 09:00:59 +00004203 0, (void *)net, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00004204 }
4205 if (net == stcb->asoc.primary_destination) {
4206 if (stcb->asoc.alternate) {
4207 /* release the alternate, primary is good */
4208 sctp_free_remote_addr(stcb->asoc.alternate);
4209 stcb->asoc.alternate = NULL;
4210 }
4211 }
4212 if (net->dest_state & SCTP_ADDR_PF) {
4213 net->dest_state &= ~SCTP_ADDR_PF;
t00fcxen0057a6d2015-05-28 16:42:49 +00004214 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4215 stcb->sctp_ep, stcb, net,
Michael Tuexene5001952016-04-17 19:25:27 +02004216 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
tuexendd729232011-11-01 23:04:43 +00004217 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4218 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4219 /* Done with this net */
4220 net->net_ack = 0;
4221 }
4222 /* restore any doubled timers */
4223 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4224 if (net->RTO < stcb->asoc.minrto) {
4225 net->RTO = stcb->asoc.minrto;
4226 }
4227 if (net->RTO > stcb->asoc.maxrto) {
4228 net->RTO = stcb->asoc.maxrto;
4229 }
4230 }
4231 }
4232 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4233 }
4234 asoc->last_acked_seq = cumack;
4235
4236 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4237 /* nothing left in-flight */
4238 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4239 net->flight_size = 0;
4240 net->partial_bytes_acked = 0;
4241 }
4242 asoc->total_flight = 0;
4243 asoc->total_flight_count = 0;
4244 }
4245
4246 /* RWND update */
4247 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4248 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4249 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4250 /* SWS sender side engages */
4251 asoc->peers_rwnd = 0;
4252 }
4253 if (asoc->peers_rwnd > old_rwnd) {
4254 win_probe_recovery = 1;
4255 }
4256 /* Now assure a timer where data is queued at */
4257again:
4258 j = 0;
4259 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
tuexendd729232011-11-01 23:04:43 +00004260 if (win_probe_recovery && (net->window_probe)) {
4261 win_probe_recovered = 1;
4262 /*
4263 * Find first chunk that was used with window probe
4264 * and clear the sent
4265 */
4266 /* sa_ignore FREED_MEMORY */
4267 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4268 if (tp1->window_probe) {
4269 /* move back to data send queue */
tuexen9784e9a2011-12-18 13:04:23 +00004270 sctp_window_probe_recovery(stcb, asoc, tp1);
tuexendd729232011-11-01 23:04:43 +00004271 break;
4272 }
4273 }
4274 }
tuexendd729232011-11-01 23:04:43 +00004275 if (net->flight_size) {
4276 j++;
Michael Tuexena7360a12017-09-17 11:30:34 +02004277 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
tuexendd729232011-11-01 23:04:43 +00004278 if (net->window_probe) {
4279 net->window_probe = 0;
4280 }
4281 } else {
4282 if (net->window_probe) {
4283 /* In window probes we must assure a timer is still running there */
4284 net->window_probe = 0;
4285 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
Michael Tuexena7360a12017-09-17 11:30:34 +02004286 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
tuexendd729232011-11-01 23:04:43 +00004287 }
4288 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4289 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4290 stcb, net,
Michael Tuexene5001952016-04-17 19:25:27 +02004291 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
tuexendd729232011-11-01 23:04:43 +00004292 }
4293 }
4294 }
4295 if ((j == 0) &&
4296 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4297 (asoc->sent_queue_retran_cnt == 0) &&
4298 (win_probe_recovered == 0) &&
4299 (done_once == 0)) {
4300 /* huh, this should not happen unless all packets
4301 * are PR-SCTP and marked to skip of course.
4302 */
4303 if (sctp_fs_audit(asoc)) {
4304 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4305 net->flight_size = 0;
4306 }
4307 asoc->total_flight = 0;
4308 asoc->total_flight_count = 0;
4309 asoc->sent_queue_retran_cnt = 0;
4310 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4311 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4312 sctp_flight_size_increase(tp1);
4313 sctp_total_flight_increase(stcb, tp1);
4314 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4315 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4316 }
4317 }
4318 }
4319 done_once = 1;
4320 goto again;
4321 }
4322 /**********************************/
4323 /* Now what about shutdown issues */
4324 /**********************************/
4325 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4326 /* nothing left on sendqueue.. consider done */
4327 /* clean up */
4328 if ((asoc->stream_queue_cnt == 1) &&
4329 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
Michael Tuexen348a36c2018-08-13 16:24:47 +02004330 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
Michael Tuexenfdcf7902016-08-06 14:39:31 +02004331 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
Michael Tuexen348a36c2018-08-13 16:24:47 +02004332 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
tuexendd729232011-11-01 23:04:43 +00004333 }
Michael Tuexen74842cb2017-07-20 13:15:46 +02004334 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
Michael Tuexen348a36c2018-08-13 16:24:47 +02004335 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
Michael Tuexen74842cb2017-07-20 13:15:46 +02004336 (asoc->stream_queue_cnt == 1) &&
4337 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4338 struct mbuf *op_err;
4339
4340 *abort_now = 1;
4341 /* XXX */
4342 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4343 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4344 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4345 return;
4346 }
tuexendd729232011-11-01 23:04:43 +00004347 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4348 (asoc->stream_queue_cnt == 0)) {
Michael Tuexen74842cb2017-07-20 13:15:46 +02004349 struct sctp_nets *netp;
tuexendd729232011-11-01 23:04:43 +00004350
Michael Tuexen348a36c2018-08-13 16:24:47 +02004351 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4352 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
Michael Tuexen74842cb2017-07-20 13:15:46 +02004353 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
tuexendd729232011-11-01 23:04:43 +00004354 }
Michael Tuexen348a36c2018-08-13 16:24:47 +02004355 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
Michael Tuexen74842cb2017-07-20 13:15:46 +02004356 sctp_stop_timers_for_shutdown(stcb);
4357 if (asoc->alternate) {
4358 netp = asoc->alternate;
4359 } else {
4360 netp = asoc->primary_destination;
4361 }
4362 sctp_send_shutdown(stcb, netp);
4363 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4364 stcb->sctp_ep, stcb, netp);
4365 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4366 stcb->sctp_ep, stcb, netp);
Michael Tuexen348a36c2018-08-13 16:24:47 +02004367 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
tuexendd729232011-11-01 23:04:43 +00004368 (asoc->stream_queue_cnt == 0)) {
4369 struct sctp_nets *netp;
t00fcxend0ad16b2013-02-09 18:34:24 +00004370
tuexendd729232011-11-01 23:04:43 +00004371 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
Michael Tuexen348a36c2018-08-13 16:24:47 +02004372 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
tuexendd729232011-11-01 23:04:43 +00004373 sctp_stop_timers_for_shutdown(stcb);
t00fcxend0ad16b2013-02-09 18:34:24 +00004374 if (asoc->alternate) {
4375 netp = asoc->alternate;
4376 } else {
4377 netp = asoc->primary_destination;
4378 }
4379 sctp_send_shutdown_ack(stcb, netp);
tuexendd729232011-11-01 23:04:43 +00004380 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4381 stcb->sctp_ep, stcb, netp);
4382 }
4383 }
4384 /*********************************************/
4385 /* Here we perform PR-SCTP procedures */
4386 /* (section 4.2) */
4387 /*********************************************/
4388 /* C1. update advancedPeerAckPoint */
4389 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4390 asoc->advanced_peer_ack_point = cumack;
4391 }
4392 /* PR-Sctp issues need to be addressed too */
t00fcxen0e78cef2014-08-02 22:05:33 +00004393 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
tuexendd729232011-11-01 23:04:43 +00004394 struct sctp_tmit_chunk *lchk;
4395 uint32_t old_adv_peer_ack_point;
tuexen15f99d82012-04-19 16:08:38 +00004396
tuexendd729232011-11-01 23:04:43 +00004397 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4398 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4399 /* C3. See if we need to send a Fwd-TSN */
4400 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4401 /*
4402 * ISSUE with ECN, see FWD-TSN processing.
4403 */
4404 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4405 send_forward_tsn(stcb, asoc);
4406 } else if (lchk) {
4407 /* try to FR fwd-tsn's that get lost too */
4408 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4409 send_forward_tsn(stcb, asoc);
4410 }
4411 }
4412 }
4413 if (lchk) {
4414 /* Assure a timer is up */
4415 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4416 stcb->sctp_ep, stcb, lchk->whoTo);
4417 }
4418 }
4419 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4420 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4421 rwnd,
4422 stcb->asoc.peers_rwnd,
4423 stcb->asoc.total_flight,
4424 stcb->asoc.total_output_queue_size);
4425 }
4426}
4427
4428void
4429sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
tuexen9784e9a2011-12-18 13:04:23 +00004430 struct sctp_tcb *stcb,
tuexendd729232011-11-01 23:04:43 +00004431 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4432 int *abort_now, uint8_t flags,
4433 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4434{
4435 struct sctp_association *asoc;
4436 struct sctp_tmit_chunk *tp1, *tp2;
4437 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
tuexendd729232011-11-01 23:04:43 +00004438 uint16_t wake_him = 0;
4439 uint32_t send_s = 0;
4440 long j;
4441 int accum_moved = 0;
4442 int will_exit_fast_recovery = 0;
4443 uint32_t a_rwnd, old_rwnd;
4444 int win_probe_recovery = 0;
4445 int win_probe_recovered = 0;
4446 struct sctp_nets *net = NULL;
tuexendd729232011-11-01 23:04:43 +00004447 int done_once;
tuexen63fc0bb2011-12-27 12:24:52 +00004448 int rto_ok = 1;
tuexendd729232011-11-01 23:04:43 +00004449 uint8_t reneged_all = 0;
4450 uint8_t cmt_dac_flag;
4451 /*
4452 * we take any chance we can to service our queues since we cannot
4453 * get awoken when the socket is read from :<
4454 */
4455 /*
4456 * Now perform the actual SACK handling: 1) Verify that it is not an
4457 * old sack, if so discard. 2) If there is nothing left in the send
4458 * queue (cum-ack is equal to last acked) then you have a duplicate
4459 * too, update any rwnd change and verify no timers are running.
4460 * then return. 3) Process any new consequtive data i.e. cum-ack
4461 * moved process these first and note that it moved. 4) Process any
4462 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4463 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4464 * sync up flightsizes and things, stop all timers and also check
4465 * for shutdown_pending state. If so then go ahead and send off the
4466 * shutdown. If in shutdown recv, send off the shutdown-ack and
4467 * start that timer, Ret. 9) Strike any non-acked things and do FR
4468 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4469 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4470 * if in shutdown_recv state.
4471 */
4472 SCTP_TCB_LOCK_ASSERT(stcb);
4473 /* CMT DAC algo */
4474 this_sack_lowest_newack = 0;
tuexendd729232011-11-01 23:04:43 +00004475 SCTP_STAT_INCR(sctps_slowpath_sack);
4476 last_tsn = cum_ack;
4477 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4478#ifdef SCTP_ASOCLOG_OF_TSNS
4479 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4480 stcb->asoc.cumack_log_at++;
4481 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4482 stcb->asoc.cumack_log_at = 0;
4483 }
4484#endif
4485 a_rwnd = rwnd;
4486
4487 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4488 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4489 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4490 }
4491
4492 old_rwnd = stcb->asoc.peers_rwnd;
4493 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4494 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4495 stcb->asoc.overall_error_count,
4496 0,
4497 SCTP_FROM_SCTP_INDATA,
4498 __LINE__);
4499 }
4500 stcb->asoc.overall_error_count = 0;
4501 asoc = &stcb->asoc;
4502 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4503 sctp_log_sack(asoc->last_acked_seq,
4504 cum_ack,
4505 0,
4506 num_seg,
4507 num_dup,
4508 SCTP_LOG_NEW_SACK);
4509 }
4510 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4511 uint16_t i;
4512 uint32_t *dupdata, dblock;
4513
4514 for (i = 0; i < num_dup; i++) {
4515 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4516 sizeof(uint32_t), (uint8_t *)&dblock);
4517 if (dupdata == NULL) {
4518 break;
4519 }
4520 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4521 }
4522 }
Michael Tuexen0ec21502016-05-12 18:39:01 +02004523 /* reality check */
4524 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4525 tp1 = TAILQ_LAST(&asoc->sent_queue,
4526 sctpchunk_listhead);
Michael Tuexen00657ac2016-12-07 21:53:26 +01004527 send_s = tp1->rec.data.tsn + 1;
Michael Tuexen0ec21502016-05-12 18:39:01 +02004528 } else {
4529 tp1 = NULL;
4530 send_s = asoc->sending_seq;
4531 }
4532 if (SCTP_TSN_GE(cum_ack, send_s)) {
4533 struct mbuf *op_err;
4534 char msg[SCTP_DIAG_INFO_LEN];
t00fcxen08f9ff92014-03-16 13:38:54 +00004535
Michael Tuexen0ec21502016-05-12 18:39:01 +02004536 /*
4537 * no way, we have not even sent this TSN out yet.
4538 * Peer is hopelessly messed up with us.
4539 */
4540 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4541 cum_ack, send_s);
4542 if (tp1) {
4543 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
Michael Tuexen00657ac2016-12-07 21:53:26 +01004544 tp1->rec.data.tsn, (void *)tp1);
tuexendd729232011-11-01 23:04:43 +00004545 }
Michael Tuexen0ec21502016-05-12 18:39:01 +02004546 hopeless_peer:
4547 *abort_now = 1;
4548 /* XXX */
4549 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4550 cum_ack, send_s);
4551 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4552 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4553 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4554 return;
tuexendd729232011-11-01 23:04:43 +00004555 }
4556 /**********************/
4557 /* 1) check the range */
4558 /**********************/
4559 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4560 /* acking something behind */
4561 return;
4562 }
tuexendd729232011-11-01 23:04:43 +00004563
4564 /* update the Rwnd of the peer */
4565 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4566 TAILQ_EMPTY(&asoc->send_queue) &&
4567 (asoc->stream_queue_cnt == 0)) {
4568 /* nothing left on send/sent and strmq */
4569 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4570 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4571 asoc->peers_rwnd, 0, 0, a_rwnd);
4572 }
4573 asoc->peers_rwnd = a_rwnd;
4574 if (asoc->sent_queue_retran_cnt) {
4575 asoc->sent_queue_retran_cnt = 0;
4576 }
4577 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4578 /* SWS sender side engages */
4579 asoc->peers_rwnd = 0;
4580 }
4581 /* stop any timers */
4582 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4583 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
Michael Tuexene5001952016-04-17 19:25:27 +02004584 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
tuexendd729232011-11-01 23:04:43 +00004585 net->partial_bytes_acked = 0;
4586 net->flight_size = 0;
4587 }
4588 asoc->total_flight = 0;
4589 asoc->total_flight_count = 0;
4590 return;
4591 }
4592 /*
4593 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4594 * things. The total byte count acked is tracked in netAckSz AND
4595 * netAck2 is used to track the total bytes acked that are un-
4596 * amibguious and were never retransmitted. We track these on a per
4597 * destination address basis.
4598 */
4599 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4600 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4601 /* Drag along the window_tsn for cwr's */
4602 net->cwr_window_tsn = cum_ack;
4603 }
4604 net->prev_cwnd = net->cwnd;
4605 net->net_ack = 0;
4606 net->net_ack2 = 0;
4607
4608 /*
4609 * CMT: Reset CUC and Fast recovery algo variables before
4610 * SACK processing
4611 */
4612 net->new_pseudo_cumack = 0;
4613 net->will_exit_fast_recovery = 0;
4614 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4615 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
4616 }
Michael Tuexen83714a82018-01-16 23:02:09 +01004617
4618 /*
4619 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4620 * to be greater than the cumack. Also reset saw_newack to 0
4621 * for all dests.
4622 */
4623 net->saw_newack = 0;
4624 net->this_sack_highest_newack = last_tsn;
tuexendd729232011-11-01 23:04:43 +00004625 }
4626 /* process the new consecutive TSN first */
4627 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004628 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
tuexendd729232011-11-01 23:04:43 +00004629 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4630 accum_moved = 1;
4631 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4632 /*
4633 * If it is less than ACKED, it is
4634 * now no-longer in flight. Higher
4635 * values may occur during marking
4636 */
4637 if ((tp1->whoTo->dest_state &
4638 SCTP_ADDR_UNCONFIRMED) &&
4639 (tp1->snd_count < 2)) {
4640 /*
4641 * If there was no retran
4642 * and the address is
4643 * un-confirmed and we sent
4644 * there and are now
4645 * sacked.. its confirmed,
4646 * mark it so.
4647 */
4648 tp1->whoTo->dest_state &=
4649 ~SCTP_ADDR_UNCONFIRMED;
4650 }
4651 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4652 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4653 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4654 tp1->whoTo->flight_size,
4655 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01004656 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01004657 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00004658 }
4659 sctp_flight_size_decrease(tp1);
4660 sctp_total_flight_decrease(stcb, tp1);
4661 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4662 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
4663 tp1);
4664 }
4665 }
4666 tp1->whoTo->net_ack += tp1->send_size;
4667
4668 /* CMT SFR and DAC algos */
Michael Tuexen00657ac2016-12-07 21:53:26 +01004669 this_sack_lowest_newack = tp1->rec.data.tsn;
tuexendd729232011-11-01 23:04:43 +00004670 tp1->whoTo->saw_newack = 1;
4671
4672 if (tp1->snd_count < 2) {
4673 /*
Michael Tuexenc51af972018-08-12 15:32:55 +02004674 * True non-retransmitted
tuexendd729232011-11-01 23:04:43 +00004675 * chunk
4676 */
4677 tp1->whoTo->net_ack2 +=
4678 tp1->send_size;
4679
4680 /* update RTO too? */
4681 if (tp1->do_rtt) {
4682 if (rto_ok) {
4683 tp1->whoTo->RTO =
4684 sctp_calculate_rto(stcb,
4685 asoc, tp1->whoTo,
4686 &tp1->sent_rcv_time,
tuexendd729232011-11-01 23:04:43 +00004687 SCTP_RTT_FROM_DATA);
4688 rto_ok = 0;
4689 }
4690 if (tp1->whoTo->rto_needed == 0) {
4691 tp1->whoTo->rto_needed = 1;
4692 }
4693 tp1->do_rtt = 0;
4694 }
4695 }
4696 /*
4697 * CMT: CUCv2 algorithm. From the
4698 * cumack'd TSNs, for each TSN being
4699 * acked for the first time, set the
4700 * following variables for the
4701 * corresp destination.
4702 * new_pseudo_cumack will trigger a
4703 * cwnd update.
4704 * find_(rtx_)pseudo_cumack will
4705 * trigger search for the next
4706 * expected (rtx-)pseudo-cumack.
4707 */
4708 tp1->whoTo->new_pseudo_cumack = 1;
4709 tp1->whoTo->find_pseudo_cumack = 1;
4710 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4711
4712
4713 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4714 sctp_log_sack(asoc->last_acked_seq,
4715 cum_ack,
Michael Tuexen00657ac2016-12-07 21:53:26 +01004716 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00004717 0,
4718 0,
4719 SCTP_LOG_TSN_ACKED);
4720 }
4721 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004722 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
tuexendd729232011-11-01 23:04:43 +00004723 }
4724 }
4725 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4726 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4727#ifdef SCTP_AUDITING_ENABLED
4728 sctp_audit_log(0xB3,
4729 (asoc->sent_queue_retran_cnt & 0x000000ff));
4730#endif
4731 }
4732 if (tp1->rec.data.chunk_was_revoked) {
4733 /* deflate the cwnd */
4734 tp1->whoTo->cwnd -= tp1->book_size;
4735 tp1->rec.data.chunk_was_revoked = 0;
4736 }
t00fcxen8fcc5142012-11-16 19:46:12 +00004737 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4738 tp1->sent = SCTP_DATAGRAM_ACKED;
4739 }
tuexendd729232011-11-01 23:04:43 +00004740 }
4741 } else {
4742 break;
4743 }
4744 }
4745 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4746 /* always set this up to cum-ack */
4747 asoc->this_sack_highest_gap = last_tsn;
4748
4749 if ((num_seg > 0) || (num_nr_seg > 0)) {
4750
4751 /*
tuexendd729232011-11-01 23:04:43 +00004752 * thisSackHighestGap will increase while handling NEW
4753 * segments this_sack_highest_newack will increase while
4754 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4755 * used for CMT DAC algo. saw_newack will also change.
4756 */
4757 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4758 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
tuexen9784e9a2011-12-18 13:04:23 +00004759 num_seg, num_nr_seg, &rto_ok)) {
tuexendd729232011-11-01 23:04:43 +00004760 wake_him++;
4761 }
Michael Tuexen0ec21502016-05-12 18:39:01 +02004762 /*
4763 * validate the biggest_tsn_acked in the gap acks if
4764 * strict adherence is wanted.
4765 */
4766 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
tuexendd729232011-11-01 23:04:43 +00004767 /*
Michael Tuexen0ec21502016-05-12 18:39:01 +02004768 * peer is either confused or we are under
4769 * attack. We must abort.
tuexendd729232011-11-01 23:04:43 +00004770 */
Michael Tuexen0ec21502016-05-12 18:39:01 +02004771 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4772 biggest_tsn_acked, send_s);
4773 goto hopeless_peer;
tuexendd729232011-11-01 23:04:43 +00004774 }
4775 }
4776 /*******************************************/
4777 /* cancel ALL T3-send timer if accum moved */
4778 /*******************************************/
4779 if (asoc->sctp_cmt_on_off > 0) {
4780 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4781 if (net->new_pseudo_cumack)
4782 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4783 stcb, net,
Michael Tuexene5001952016-04-17 19:25:27 +02004784 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
tuexendd729232011-11-01 23:04:43 +00004785
4786 }
4787 } else {
4788 if (accum_moved) {
4789 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4790 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
Michael Tuexene5001952016-04-17 19:25:27 +02004791 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
tuexendd729232011-11-01 23:04:43 +00004792 }
4793 }
4794 }
4795 /********************************************/
4796 /* drop the acked chunks from the sentqueue */
4797 /********************************************/
4798 asoc->last_acked_seq = cum_ack;
4799
4800 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004801 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
tuexendd729232011-11-01 23:04:43 +00004802 break;
4803 }
t00fcxen8fcc5142012-11-16 19:46:12 +00004804 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004805 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4806 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
t00fcxen9ad90772012-11-07 22:19:57 +00004807#ifdef INVARIANTS
4808 } else {
Michael Tuexen00657ac2016-12-07 21:53:26 +01004809 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
t00fcxen9ad90772012-11-07 22:19:57 +00004810#endif
4811 }
tuexendd729232011-11-01 23:04:43 +00004812 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01004813 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4814 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4815 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
Michael Tuexenc0a12d12015-12-03 16:30:24 +01004816 asoc->trigger_reset = 1;
4817 }
tuexendd729232011-11-01 23:04:43 +00004818 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
t00fcxen91ceb732013-09-03 19:40:11 +00004819 if (PR_SCTP_ENABLED(tp1->flags)) {
tuexendd729232011-11-01 23:04:43 +00004820 if (asoc->pr_sctp_cnt != 0)
4821 asoc->pr_sctp_cnt--;
4822 }
4823 asoc->sent_queue_cnt--;
4824 if (tp1->data) {
4825 /* sa_ignore NO_NULL_CHK */
4826 sctp_free_bufspace(stcb, asoc, tp1, 1);
4827 sctp_m_freem(tp1->data);
4828 tp1->data = NULL;
t00fcxen0e78cef2014-08-02 22:05:33 +00004829 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
tuexendd729232011-11-01 23:04:43 +00004830 asoc->sent_queue_cnt_removeable--;
4831 }
4832 }
4833 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4834 sctp_log_sack(asoc->last_acked_seq,
4835 cum_ack,
Michael Tuexen00657ac2016-12-07 21:53:26 +01004836 tp1->rec.data.tsn,
tuexendd729232011-11-01 23:04:43 +00004837 0,
4838 0,
4839 SCTP_LOG_FREE_SENT);
4840 }
4841 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4842 wake_him++;
4843 }
4844 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4845#ifdef INVARIANTS
Michael Tuexen34488e72016-05-03 22:11:59 +02004846 panic("Warning flight size is positive and should be 0");
tuexendd729232011-11-01 23:04:43 +00004847#else
4848 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4849 asoc->total_flight);
4850#endif
4851 asoc->total_flight = 0;
4852 }
4853
tuexen98456cf2012-04-19 15:37:07 +00004854#if defined(__Userspace__)
4855 if (stcb->sctp_ep->recv_callback) {
4856 if (stcb->sctp_socket) {
4857 uint32_t inqueue_bytes, sb_free_now;
4858 struct sctp_inpcb *inp;
tuexen749d8562011-11-13 13:41:49 +00004859
tuexen98456cf2012-04-19 15:37:07 +00004860 inp = stcb->sctp_ep;
tuexen749d8562011-11-13 13:41:49 +00004861 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
tuexen1ee04c82012-04-19 16:35:13 +00004862 sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
tuexen98456cf2012-04-19 15:37:07 +00004863
4864 /* check if the amount free in the send socket buffer crossed the threshold */
4865 if (inp->send_callback &&
4866 (((inp->send_sb_threshold > 0) && (sb_free_now >= inp->send_sb_threshold)) ||
4867 (inp->send_sb_threshold == 0))) {
4868 atomic_add_int(&stcb->asoc.refcnt, 1);
4869 SCTP_TCB_UNLOCK(stcb);
4870 inp->send_callback(stcb->sctp_socket, sb_free_now);
4871 SCTP_TCB_LOCK(stcb);
4872 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4873 }
tuexen749d8562011-11-13 13:41:49 +00004874 }
tuexen98456cf2012-04-19 15:37:07 +00004875 } else if ((wake_him) && (stcb->sctp_socket)) {
tuexen749d8562011-11-13 13:41:49 +00004876#else
tuexendd729232011-11-01 23:04:43 +00004877 /* sa_ignore NO_NULL_CHK */
4878 if ((wake_him) && (stcb->sctp_socket)) {
tuexen98456cf2012-04-19 15:37:07 +00004879#endif
tuexen6bffa9a2012-06-25 17:40:03 +00004880#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
tuexendd729232011-11-01 23:04:43 +00004881 struct socket *so;
4882
4883#endif
4884 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4885 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
tuexen9784e9a2011-12-18 13:04:23 +00004886 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
tuexendd729232011-11-01 23:04:43 +00004887 }
tuexen6bffa9a2012-06-25 17:40:03 +00004888#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
tuexendd729232011-11-01 23:04:43 +00004889 so = SCTP_INP_SO(stcb->sctp_ep);
4890 atomic_add_int(&stcb->asoc.refcnt, 1);
4891 SCTP_TCB_UNLOCK(stcb);
4892 SCTP_SOCKET_LOCK(so, 1);
4893 SCTP_TCB_LOCK(stcb);
4894 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4895 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4896 /* assoc was freed while we were unlocked */
4897 SCTP_SOCKET_UNLOCK(so, 1);
4898 return;
4899 }
4900#endif
4901 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
tuexen6bffa9a2012-06-25 17:40:03 +00004902#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
tuexendd729232011-11-01 23:04:43 +00004903 SCTP_SOCKET_UNLOCK(so, 1);
4904#endif
4905 } else {
4906 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
tuexen9784e9a2011-12-18 13:04:23 +00004907 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
tuexendd729232011-11-01 23:04:43 +00004908 }
4909 }
tuexendd729232011-11-01 23:04:43 +00004910
4911 if (asoc->fast_retran_loss_recovery && accum_moved) {
4912 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4913 /* Setup so we will exit RFC2582 fast recovery */
4914 will_exit_fast_recovery = 1;
4915 }
4916 }
4917 /*
4918 * Check for revoked fragments:
4919 *
4920 * if Previous sack - Had no frags then we can't have any revoked if
4921 * Previous sack - Had frag's then - If we now have frags aka
4922 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4923 * some of them. else - The peer revoked all ACKED fragments, since
4924 * we had some before and now we have NONE.
4925 */
4926
4927 if (num_seg) {
4928 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4929 asoc->saw_sack_with_frags = 1;
4930 } else if (asoc->saw_sack_with_frags) {
4931 int cnt_revoked = 0;
4932
4933 /* Peer revoked all dg's marked or acked */
4934 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4935 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4936 tp1->sent = SCTP_DATAGRAM_SENT;
4937 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4938 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4939 tp1->whoTo->flight_size,
4940 tp1->book_size,
Michael Tuexen95da8e52016-03-25 12:29:52 +01004941 (uint32_t)(uintptr_t)tp1->whoTo,
Michael Tuexen00657ac2016-12-07 21:53:26 +01004942 tp1->rec.data.tsn);
tuexendd729232011-11-01 23:04:43 +00004943 }
4944 sctp_flight_size_increase(tp1);
4945 sctp_total_flight_increase(stcb, tp1);
4946 tp1->rec.data.chunk_was_revoked = 1;
4947 /*
4948 * To ensure that this increase in
4949 * flightsize, which is artificial,
4950 * does not throttle the sender, we
4951 * also increase the cwnd
4952 * artificially.
4953 */
4954 tp1->whoTo->cwnd += tp1->book_size;
4955 cnt_revoked++;
4956 }
4957 }
4958 if (cnt_revoked) {
4959 reneged_all = 1;
4960 }
4961 asoc->saw_sack_with_frags = 0;
4962 }
4963 if (num_nr_seg > 0)
4964 asoc->saw_sack_with_nr_frags = 1;
4965 else
4966 asoc->saw_sack_with_nr_frags = 0;
4967
4968 /* JRS - Use the congestion control given in the CC module */
4969 if (ecne_seen == 0) {
4970 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4971 if (net->net_ack2 > 0) {
4972 /*
4973 * Karn's rule applies to clearing error count, this
4974 * is optional.
4975 */
4976 net->error_count = 0;
4977 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4978 /* addr came good */
4979 net->dest_state |= SCTP_ADDR_REACHABLE;
4980 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
tuexenda53ff02012-05-14 09:00:59 +00004981 0, (void *)net, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00004982 }
4983
4984 if (net == stcb->asoc.primary_destination) {
4985 if (stcb->asoc.alternate) {
4986 /* release the alternate, primary is good */
4987 sctp_free_remote_addr(stcb->asoc.alternate);
4988 stcb->asoc.alternate = NULL;
4989 }
4990 }
4991
4992 if (net->dest_state & SCTP_ADDR_PF) {
4993 net->dest_state &= ~SCTP_ADDR_PF;
t00fcxen0057a6d2015-05-28 16:42:49 +00004994 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4995 stcb->sctp_ep, stcb, net,
Michael Tuexene5001952016-04-17 19:25:27 +02004996 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
tuexendd729232011-11-01 23:04:43 +00004997 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4998 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4999 /* Done with this net */
5000 net->net_ack = 0;
5001 }
5002 /* restore any doubled timers */
5003 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
5004 if (net->RTO < stcb->asoc.minrto) {
5005 net->RTO = stcb->asoc.minrto;
5006 }
5007 if (net->RTO > stcb->asoc.maxrto) {
5008 net->RTO = stcb->asoc.maxrto;
5009 }
5010 }
5011 }
5012 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5013 }
5014
5015 if (TAILQ_EMPTY(&asoc->sent_queue)) {
5016 /* nothing left in-flight */
5017 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5018 /* stop all timers */
5019 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
t00fcxen0057a6d2015-05-28 16:42:49 +00005020 stcb, net,
Michael Tuexene5001952016-04-17 19:25:27 +02005021 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
tuexendd729232011-11-01 23:04:43 +00005022 net->flight_size = 0;
5023 net->partial_bytes_acked = 0;
5024 }
5025 asoc->total_flight = 0;
5026 asoc->total_flight_count = 0;
5027 }
5028
5029 /**********************************/
5030 /* Now what about shutdown issues */
5031 /**********************************/
5032 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5033 /* nothing left on sendqueue.. consider done */
5034 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5035 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5036 asoc->peers_rwnd, 0, 0, a_rwnd);
5037 }
5038 asoc->peers_rwnd = a_rwnd;
5039 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5040 /* SWS sender side engages */
5041 asoc->peers_rwnd = 0;
5042 }
5043 /* clean up */
5044 if ((asoc->stream_queue_cnt == 1) &&
5045 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
Michael Tuexen348a36c2018-08-13 16:24:47 +02005046 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005047 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
Michael Tuexen348a36c2018-08-13 16:24:47 +02005048 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
tuexendd729232011-11-01 23:04:43 +00005049 }
Michael Tuexen74842cb2017-07-20 13:15:46 +02005050 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
Michael Tuexen348a36c2018-08-13 16:24:47 +02005051 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
Michael Tuexen74842cb2017-07-20 13:15:46 +02005052 (asoc->stream_queue_cnt == 1) &&
5053 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5054 struct mbuf *op_err;
5055
5056 *abort_now = 1;
5057 /* XXX */
5058 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5059 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
5060 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5061 return;
5062 }
tuexendd729232011-11-01 23:04:43 +00005063 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5064 (asoc->stream_queue_cnt == 0)) {
Michael Tuexen74842cb2017-07-20 13:15:46 +02005065 struct sctp_nets *netp;
t00fcxen08f9ff92014-03-16 13:38:54 +00005066
Michael Tuexen348a36c2018-08-13 16:24:47 +02005067 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5068 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
Michael Tuexen74842cb2017-07-20 13:15:46 +02005069 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
tuexendd729232011-11-01 23:04:43 +00005070 }
Michael Tuexen348a36c2018-08-13 16:24:47 +02005071 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
Michael Tuexen74842cb2017-07-20 13:15:46 +02005072 sctp_stop_timers_for_shutdown(stcb);
5073 if (asoc->alternate) {
5074 netp = asoc->alternate;
5075 } else {
5076 netp = asoc->primary_destination;
5077 }
5078 sctp_send_shutdown(stcb, netp);
5079 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5080 stcb->sctp_ep, stcb, netp);
5081 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5082 stcb->sctp_ep, stcb, netp);
tuexendd729232011-11-01 23:04:43 +00005083 return;
Michael Tuexen348a36c2018-08-13 16:24:47 +02005084 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
tuexendd729232011-11-01 23:04:43 +00005085 (asoc->stream_queue_cnt == 0)) {
5086 struct sctp_nets *netp;
t00fcxend0ad16b2013-02-09 18:34:24 +00005087
tuexendd729232011-11-01 23:04:43 +00005088 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
Michael Tuexen348a36c2018-08-13 16:24:47 +02005089 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
tuexendd729232011-11-01 23:04:43 +00005090 sctp_stop_timers_for_shutdown(stcb);
t00fcxend0ad16b2013-02-09 18:34:24 +00005091 if (asoc->alternate) {
5092 netp = asoc->alternate;
5093 } else {
5094 netp = asoc->primary_destination;
5095 }
5096 sctp_send_shutdown_ack(stcb, netp);
tuexendd729232011-11-01 23:04:43 +00005097 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5098 stcb->sctp_ep, stcb, netp);
5099 return;
5100 }
5101 }
5102 /*
5103 * Now here we are going to recycle net_ack for a different use...
5104 * HEADS UP.
5105 */
5106 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5107 net->net_ack = 0;
5108 }
5109
5110 /*
5111 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5112 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5113 * automatically ensure that.
5114 */
5115 if ((asoc->sctp_cmt_on_off > 0) &&
5116 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5117 (cmt_dac_flag == 0)) {
5118 this_sack_lowest_newack = cum_ack;
5119 }
5120 if ((num_seg > 0) || (num_nr_seg > 0)) {
5121 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5122 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5123 }
5124 /* JRS - Use the congestion control given in the CC module */
5125 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5126
5127 /* Now are we exiting loss recovery ? */
5128 if (will_exit_fast_recovery) {
5129 /* Ok, we must exit fast recovery */
5130 asoc->fast_retran_loss_recovery = 0;
5131 }
5132 if ((asoc->sat_t3_loss_recovery) &&
5133 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5134 /* end satellite t3 loss recovery */
5135 asoc->sat_t3_loss_recovery = 0;
5136 }
5137 /*
5138 * CMT Fast recovery
5139 */
5140 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5141 if (net->will_exit_fast_recovery) {
5142 /* Ok, we must exit fast recovery */
5143 net->fast_retran_loss_recovery = 0;
5144 }
5145 }
5146
5147 /* Adjust and set the new rwnd value */
5148 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5149 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5150 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5151 }
5152 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5153 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5154 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5155 /* SWS sender side engages */
5156 asoc->peers_rwnd = 0;
5157 }
5158 if (asoc->peers_rwnd > old_rwnd) {
5159 win_probe_recovery = 1;
5160 }
5161
5162 /*
5163 * Now we must setup so we have a timer up for anyone with
5164 * outstanding data.
5165 */
5166 done_once = 0;
5167again:
5168 j = 0;
5169 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5170 if (win_probe_recovery && (net->window_probe)) {
5171 win_probe_recovered = 1;
5172 /*-
5173 * Find first chunk that was used with
5174 * window probe and clear the event. Put
5175 * it back into the send queue as if has
5176 * not been sent.
5177 */
5178 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5179 if (tp1->window_probe) {
tuexen9784e9a2011-12-18 13:04:23 +00005180 sctp_window_probe_recovery(stcb, asoc, tp1);
tuexendd729232011-11-01 23:04:43 +00005181 break;
5182 }
5183 }
5184 }
5185 if (net->flight_size) {
5186 j++;
5187 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5188 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5189 stcb->sctp_ep, stcb, net);
5190 }
5191 if (net->window_probe) {
5192 net->window_probe = 0;
5193 }
5194 } else {
5195 if (net->window_probe) {
5196 /* In window probes we must assure a timer is still running there */
5197 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5198 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5199 stcb->sctp_ep, stcb, net);
5200
5201 }
5202 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5203 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5204 stcb, net,
Michael Tuexene5001952016-04-17 19:25:27 +02005205 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
tuexendd729232011-11-01 23:04:43 +00005206 }
5207 }
5208 }
5209 if ((j == 0) &&
5210 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5211 (asoc->sent_queue_retran_cnt == 0) &&
5212 (win_probe_recovered == 0) &&
5213 (done_once == 0)) {
5214 /* huh, this should not happen unless all packets
5215 * are PR-SCTP and marked to skip of course.
5216 */
5217 if (sctp_fs_audit(asoc)) {
5218 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5219 net->flight_size = 0;
5220 }
5221 asoc->total_flight = 0;
5222 asoc->total_flight_count = 0;
5223 asoc->sent_queue_retran_cnt = 0;
5224 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5225 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5226 sctp_flight_size_increase(tp1);
5227 sctp_total_flight_increase(stcb, tp1);
5228 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5229 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5230 }
5231 }
5232 }
5233 done_once = 1;
5234 goto again;
5235 }
5236 /*********************************************/
5237 /* Here we perform PR-SCTP procedures */
5238 /* (section 4.2) */
5239 /*********************************************/
5240 /* C1. update advancedPeerAckPoint */
5241 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5242 asoc->advanced_peer_ack_point = cum_ack;
5243 }
5244 /* C2. try to further move advancedPeerAckPoint ahead */
t00fcxen0e78cef2014-08-02 22:05:33 +00005245 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
tuexendd729232011-11-01 23:04:43 +00005246 struct sctp_tmit_chunk *lchk;
5247 uint32_t old_adv_peer_ack_point;
tuexen15f99d82012-04-19 16:08:38 +00005248
tuexendd729232011-11-01 23:04:43 +00005249 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5250 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5251 /* C3. See if we need to send a Fwd-TSN */
5252 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5253 /*
5254 * ISSUE with ECN, see FWD-TSN processing.
5255 */
5256 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5257 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5258 0xee, cum_ack, asoc->advanced_peer_ack_point,
5259 old_adv_peer_ack_point);
5260 }
5261 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5262 send_forward_tsn(stcb, asoc);
5263 } else if (lchk) {
5264 /* try to FR fwd-tsn's that get lost too */
5265 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5266 send_forward_tsn(stcb, asoc);
5267 }
5268 }
5269 }
5270 if (lchk) {
5271 /* Assure a timer is up */
5272 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5273 stcb->sctp_ep, stcb, lchk->whoTo);
5274 }
5275 }
5276 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5277 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5278 a_rwnd,
5279 stcb->asoc.peers_rwnd,
5280 stcb->asoc.total_flight,
5281 stcb->asoc.total_output_queue_size);
5282 }
5283}
5284
5285void
tuexen9784e9a2011-12-18 13:04:23 +00005286sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
tuexendd729232011-11-01 23:04:43 +00005287{
5288 /* Copy cum-ack */
5289 uint32_t cum_ack, a_rwnd;
5290
5291 cum_ack = ntohl(cp->cumulative_tsn_ack);
5292 /* Arrange so a_rwnd does NOT change */
5293 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5294
5295 /* Now call the express sack handling */
5296 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5297}
5298
5299static void
5300sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
Michael Tuexene5001952016-04-17 19:25:27 +02005301 struct sctp_stream_in *strmin)
tuexendd729232011-11-01 23:04:43 +00005302{
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005303 struct sctp_queued_to_read *control, *ncontrol;
tuexendd729232011-11-01 23:04:43 +00005304 struct sctp_association *asoc;
Michael Tuexen00657ac2016-12-07 21:53:26 +01005305 uint32_t mid;
5306 int need_reasm_check = 0;
tuexen15f99d82012-04-19 16:08:38 +00005307
tuexendd729232011-11-01 23:04:43 +00005308 asoc = &stcb->asoc;
Michael Tuexen00657ac2016-12-07 21:53:26 +01005309 mid = strmin->last_mid_delivered;
tuexendd729232011-11-01 23:04:43 +00005310 /*
5311 * First deliver anything prior to and including the stream no that
Michael Tuexene5001952016-04-17 19:25:27 +02005312 * came in.
tuexendd729232011-11-01 23:04:43 +00005313 */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005314 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5315 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
tuexendd729232011-11-01 23:04:43 +00005316 /* this is deliverable now */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005317 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5318 if (control->on_strm_q) {
5319 if (control->on_strm_q == SCTP_ON_ORDERED) {
5320 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5321 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5322 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
Michael Tuexenf6d20c52016-04-18 11:31:05 +02005323#ifdef INVARIANTS
Michael Tuexene5001952016-04-17 19:25:27 +02005324 } else {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02005325 panic("strmin: %p ctl: %p unknown %d",
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005326 strmin, control, control->on_strm_q);
Michael Tuexenf6d20c52016-04-18 11:31:05 +02005327#endif
Michael Tuexene5001952016-04-17 19:25:27 +02005328 }
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005329 control->on_strm_q = 0;
Michael Tuexene5001952016-04-17 19:25:27 +02005330 }
5331 /* subtract pending on streams */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005332 if (asoc->size_on_all_streams >= control->length) {
5333 asoc->size_on_all_streams -= control->length;
5334 } else {
5335#ifdef INVARIANTS
5336 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5337#else
5338 asoc->size_on_all_streams = 0;
5339#endif
5340 }
Michael Tuexene5001952016-04-17 19:25:27 +02005341 sctp_ucount_decr(asoc->cnt_on_all_streams);
5342 /* deliver it to at least the delivery-q */
5343 if (stcb->sctp_socket) {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005344 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02005345 sctp_add_to_readq(stcb->sctp_ep, stcb,
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005346 control,
Michael Tuexene5001952016-04-17 19:25:27 +02005347 &stcb->sctp_socket->so_rcv,
5348 1, SCTP_READ_LOCK_HELD,
5349 SCTP_SO_NOT_LOCKED);
5350 }
5351 } else {
5352 /* Its a fragmented message */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005353 if (control->first_frag_seen) {
Michael Tuexene5001952016-04-17 19:25:27 +02005354 /* Make it so this is next to deliver, we restore later */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005355 strmin->last_mid_delivered = control->mid - 1;
Michael Tuexene5001952016-04-17 19:25:27 +02005356 need_reasm_check = 1;
5357 break;
5358 }
tuexendd729232011-11-01 23:04:43 +00005359 }
5360 } else {
5361 /* no more delivery now. */
5362 break;
5363 }
5364 }
Michael Tuexene5001952016-04-17 19:25:27 +02005365 if (need_reasm_check) {
5366 int ret;
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005367 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
Michael Tuexen00657ac2016-12-07 21:53:26 +01005368 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
Michael Tuexene5001952016-04-17 19:25:27 +02005369 /* Restore the next to deliver unless we are ahead */
Michael Tuexen00657ac2016-12-07 21:53:26 +01005370 strmin->last_mid_delivered = mid;
Michael Tuexene5001952016-04-17 19:25:27 +02005371 }
5372 if (ret == 0) {
5373 /* Left the front Partial one on */
5374 return;
5375 }
5376 need_reasm_check = 0;
5377 }
tuexendd729232011-11-01 23:04:43 +00005378 /*
5379 * now we must deliver things in queue the normal way if any are
5380 * now ready.
5381 */
Michael Tuexen00657ac2016-12-07 21:53:26 +01005382 mid = strmin->last_mid_delivered + 1;
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005383 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5384 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5385 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
Michael Tuexene5001952016-04-17 19:25:27 +02005386 /* this is deliverable now */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005387 if (control->on_strm_q) {
5388 if (control->on_strm_q == SCTP_ON_ORDERED) {
5389 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5390 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5391 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
Michael Tuexenf6d20c52016-04-18 11:31:05 +02005392#ifdef INVARIANTS
Michael Tuexene5001952016-04-17 19:25:27 +02005393 } else {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02005394 panic("strmin: %p ctl: %p unknown %d",
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005395 strmin, control, control->on_strm_q);
Michael Tuexenf6d20c52016-04-18 11:31:05 +02005396#endif
Michael Tuexene5001952016-04-17 19:25:27 +02005397 }
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005398 control->on_strm_q = 0;
Michael Tuexene5001952016-04-17 19:25:27 +02005399 }
5400 /* subtract pending on streams */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005401 if (asoc->size_on_all_streams >= control->length) {
5402 asoc->size_on_all_streams -= control->length;
5403 } else {
5404#ifdef INVARIANTS
5405 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5406#else
5407 asoc->size_on_all_streams = 0;
5408#endif
5409 }
Michael Tuexene5001952016-04-17 19:25:27 +02005410 sctp_ucount_decr(asoc->cnt_on_all_streams);
5411 /* deliver it to at least the delivery-q */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005412 strmin->last_mid_delivered = control->mid;
Michael Tuexene5001952016-04-17 19:25:27 +02005413 if (stcb->sctp_socket) {
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005414 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
Michael Tuexene5001952016-04-17 19:25:27 +02005415 sctp_add_to_readq(stcb->sctp_ep, stcb,
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005416 control,
Michael Tuexene5001952016-04-17 19:25:27 +02005417 &stcb->sctp_socket->so_rcv, 1,
5418 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00005419
Michael Tuexene5001952016-04-17 19:25:27 +02005420 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01005421 mid = strmin->last_mid_delivered + 1;
Michael Tuexene5001952016-04-17 19:25:27 +02005422 } else {
5423 /* Its a fragmented message */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005424 if (control->first_frag_seen) {
Michael Tuexene5001952016-04-17 19:25:27 +02005425 /* Make it so this is next to deliver */
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005426 strmin->last_mid_delivered = control->mid - 1;
Michael Tuexene5001952016-04-17 19:25:27 +02005427 need_reasm_check = 1;
5428 break;
5429 }
tuexendd729232011-11-01 23:04:43 +00005430 }
tuexendd729232011-11-01 23:04:43 +00005431 } else {
5432 break;
5433 }
5434 }
Michael Tuexene5001952016-04-17 19:25:27 +02005435 if (need_reasm_check) {
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005436 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
Michael Tuexene5001952016-04-17 19:25:27 +02005437 }
tuexendd729232011-11-01 23:04:43 +00005438}
5439
Michael Tuexene64d7732016-07-17 15:21:06 +02005440
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005441
tuexendd729232011-11-01 23:04:43 +00005442static void
5443sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5444 struct sctp_association *asoc,
Michael Tuexen00657ac2016-12-07 21:53:26 +01005445 uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
tuexendd729232011-11-01 23:04:43 +00005446{
Michael Tuexene5001952016-04-17 19:25:27 +02005447 struct sctp_queued_to_read *control;
5448 struct sctp_stream_in *strm;
tuexendd729232011-11-01 23:04:43 +00005449 struct sctp_tmit_chunk *chk, *nchk;
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005450 int cnt_removed=0;
Michael Tuexen00657ac2016-12-07 21:53:26 +01005451
tuexendd729232011-11-01 23:04:43 +00005452 /*
Michael Tuexene5001952016-04-17 19:25:27 +02005453 * For now large messages held on the stream reasm that are
tuexendd729232011-11-01 23:04:43 +00005454 * complete will be tossed too. We could in theory do more
5455 * work to spin through and stop after dumping one msg aka
5456 * seeing the start of a new msg at the head, and call the
5457 * delivery function... to see if it can be delivered... But
5458 * for now we just dump everything on the queue.
5459 */
Michael Tuexene5001952016-04-17 19:25:27 +02005460 strm = &asoc->strmin[stream];
Michael Tuexen00657ac2016-12-07 21:53:26 +01005461 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
Michael Tuexene5001952016-04-17 19:25:27 +02005462 if (control == NULL) {
5463 /* Not found */
5464 return;
5465 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01005466 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
Michael Tuexen93e6e552016-09-22 16:25:12 +02005467 return;
5468 }
Michael Tuexene5001952016-04-17 19:25:27 +02005469 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5470 /* Purge hanging chunks */
Michael Tuexen00657ac2016-12-07 21:53:26 +01005471 if (!asoc->idata_supported && (ordered == 0)) {
5472 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005473 break;
5474 }
5475 }
5476 cnt_removed++;
Michael Tuexene5001952016-04-17 19:25:27 +02005477 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005478 if (asoc->size_on_reasm_queue >= chk->send_size) {
5479 asoc->size_on_reasm_queue -= chk->send_size;
5480 } else {
5481#ifdef INVARIANTS
5482 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5483#else
5484 asoc->size_on_reasm_queue = 0;
5485#endif
5486 }
Michael Tuexene5001952016-04-17 19:25:27 +02005487 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5488 if (chk->data) {
5489 sctp_m_freem(chk->data);
5490 chk->data = NULL;
tuexendd729232011-11-01 23:04:43 +00005491 }
Michael Tuexene5001952016-04-17 19:25:27 +02005492 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5493 }
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005494 if (!TAILQ_EMPTY(&control->reasm)) {
5495 /* This has to be old data, unordered */
5496 if (control->data) {
5497 sctp_m_freem(control->data);
5498 control->data = NULL;
5499 }
5500 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5501 chk = TAILQ_FIRST(&control->reasm);
5502 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5503 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5504 sctp_add_chk_to_control(control, strm, stcb, asoc,
5505 chk, SCTP_READ_LOCK_HELD);
5506 }
5507 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5508 return;
5509 }
5510 if (control->on_strm_q == SCTP_ON_ORDERED) {
5511 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005512 if (asoc->size_on_all_streams >= control->length) {
5513 asoc->size_on_all_streams -= control->length;
5514 } else {
5515#ifdef INVARIANTS
5516 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5517#else
5518 asoc->size_on_all_streams = 0;
5519#endif
5520 }
5521 sctp_ucount_decr(asoc->cnt_on_all_streams);
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005522 control->on_strm_q = 0;
5523 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5524 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5525 control->on_strm_q = 0;
5526#ifdef INVARIANTS
5527 } else if (control->on_strm_q) {
5528 panic("strm: %p ctl: %p unknown %d",
5529 strm, control, control->on_strm_q);
5530#endif
5531 }
5532 control->on_strm_q = 0;
Michael Tuexene5001952016-04-17 19:25:27 +02005533 if (control->on_read_q == 0) {
5534 sctp_free_remote_addr(control->whoFrom);
5535 if (control->data) {
5536 sctp_m_freem(control->data);
5537 control->data = NULL;
tuexendd729232011-11-01 23:04:43 +00005538 }
Michael Tuexene5001952016-04-17 19:25:27 +02005539 sctp_free_a_readq(stcb, control);
tuexendd729232011-11-01 23:04:43 +00005540 }
5541}
5542
tuexendd729232011-11-01 23:04:43 +00005543void
5544sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5545 struct sctp_forward_tsn_chunk *fwd,
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005546 int *abort_flag, struct mbuf *m , int offset)
tuexendd729232011-11-01 23:04:43 +00005547{
5548 /* The pr-sctp fwd tsn */
5549 /*
5550 * here we will perform all the data receiver side steps for
5551 * processing FwdTSN, as required in by pr-sctp draft:
5552 *
5553 * Assume we get FwdTSN(x):
5554 *
Michael Tuexene64d7732016-07-17 15:21:06 +02005555 * 1) update local cumTSN to x
5556 * 2) try to further advance cumTSN to x + others we have
5557 * 3) examine and update re-ordering queue on pr-in-streams
5558 * 4) clean up re-assembly queue
5559 * 5) Send a sack to report where we are.
tuexendd729232011-11-01 23:04:43 +00005560 */
5561 struct sctp_association *asoc;
5562 uint32_t new_cum_tsn, gap;
tuexen9784e9a2011-12-18 13:04:23 +00005563 unsigned int i, fwd_sz, m_size;
tuexendd729232011-11-01 23:04:43 +00005564 uint32_t str_seq;
5565 struct sctp_stream_in *strm;
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005566 struct sctp_queued_to_read *control, *sv;
tuexendd729232011-11-01 23:04:43 +00005567
tuexendd729232011-11-01 23:04:43 +00005568 asoc = &stcb->asoc;
5569 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5570 SCTPDBG(SCTP_DEBUG_INDATA1,
5571 "Bad size too small/big fwd-tsn\n");
5572 return;
5573 }
5574 m_size = (stcb->asoc.mapping_array_size << 3);
5575 /*************************************************************/
5576 /* 1. Here we update local cumTSN and shift the bitmap array */
5577 /*************************************************************/
5578 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5579
5580 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5581 /* Already got there ... */
5582 return;
5583 }
5584 /*
5585 * now we know the new TSN is more advanced, let's find the actual
5586 * gap
5587 */
5588 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5589 asoc->cumulative_tsn = new_cum_tsn;
5590 if (gap >= m_size) {
5591 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
t00fcxen08f9ff92014-03-16 13:38:54 +00005592 struct mbuf *op_err;
5593 char msg[SCTP_DIAG_INFO_LEN];
5594
tuexendd729232011-11-01 23:04:43 +00005595 /*
5596 * out of range (of single byte chunks in the rwnd I
5597 * give out). This must be an attacker.
5598 */
5599 *abort_flag = 1;
t00fcxen08f9ff92014-03-16 13:38:54 +00005600 snprintf(msg, sizeof(msg),
5601 "New cum ack %8.8x too high, highest TSN %8.8x",
5602 new_cum_tsn, asoc->highest_tsn_inside_map);
5603 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
Michael Tuexene5001952016-04-17 19:25:27 +02005604 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
t00fcxen08f9ff92014-03-16 13:38:54 +00005605 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
tuexendd729232011-11-01 23:04:43 +00005606 return;
5607 }
5608 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
tuexen15f99d82012-04-19 16:08:38 +00005609
tuexendd729232011-11-01 23:04:43 +00005610 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5611 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5612 asoc->highest_tsn_inside_map = new_cum_tsn;
tuexen15f99d82012-04-19 16:08:38 +00005613
tuexendd729232011-11-01 23:04:43 +00005614 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5615 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
tuexen15f99d82012-04-19 16:08:38 +00005616
tuexendd729232011-11-01 23:04:43 +00005617 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5618 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5619 }
5620 } else {
5621 SCTP_TCB_LOCK_ASSERT(stcb);
5622 for (i = 0; i <= gap; i++) {
5623 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5624 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5625 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5626 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5627 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5628 }
5629 }
5630 }
5631 }
5632 /*************************************************************/
5633 /* 2. Clear up re-assembly queue */
5634 /*************************************************************/
tuexendd729232011-11-01 23:04:43 +00005635
Michael Tuexene5001952016-04-17 19:25:27 +02005636 /* This is now done as part of clearing up the stream/seq */
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005637 if (asoc->idata_supported == 0) {
5638 uint16_t sid;
5639 /* Flush all the un-ordered data based on cum-tsn */
5640 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5641 for (sid = 0 ; sid < asoc->streamincnt; sid++) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01005642 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005643 }
5644 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5645 }
tuexendd729232011-11-01 23:04:43 +00005646 /*******************************************************/
5647 /* 3. Update the PR-stream re-ordering queues and fix */
5648 /* delivery issues as needed. */
5649 /*******************************************************/
5650 fwd_sz -= sizeof(*fwd);
5651 if (m && fwd_sz) {
5652 /* New method. */
5653 unsigned int num_str;
Michael Tuexen00657ac2016-12-07 21:53:26 +01005654 uint32_t mid, cur_mid;
5655 uint16_t sid;
Michael Tuexene64d7732016-07-17 15:21:06 +02005656 uint16_t ordered, flags;
tuexendd729232011-11-01 23:04:43 +00005657 struct sctp_strseq *stseq, strseqbuf;
Michael Tuexene5001952016-04-17 19:25:27 +02005658 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
tuexendd729232011-11-01 23:04:43 +00005659 offset += sizeof(*fwd);
5660
5661 SCTP_INP_READ_LOCK(stcb->sctp_ep);
Michael Tuexene5001952016-04-17 19:25:27 +02005662 if (asoc->idata_supported) {
5663 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
Michael Tuexene5001952016-04-17 19:25:27 +02005664 } else {
5665 num_str = fwd_sz / sizeof(struct sctp_strseq);
Michael Tuexene5001952016-04-17 19:25:27 +02005666 }
tuexendd729232011-11-01 23:04:43 +00005667 for (i = 0; i < num_str; i++) {
Michael Tuexene5001952016-04-17 19:25:27 +02005668 if (asoc->idata_supported) {
5669 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5670 sizeof(struct sctp_strseq_mid),
5671 (uint8_t *)&strseqbuf_m);
5672 offset += sizeof(struct sctp_strseq_mid);
5673 if (stseq_m == NULL) {
5674 break;
5675 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01005676 sid = ntohs(stseq_m->sid);
5677 mid = ntohl(stseq_m->mid);
Michael Tuexene64d7732016-07-17 15:21:06 +02005678 flags = ntohs(stseq_m->flags);
5679 if (flags & PR_SCTP_UNORDERED_FLAG) {
5680 ordered = 0;
5681 } else {
5682 ordered = 1;
5683 }
Michael Tuexene5001952016-04-17 19:25:27 +02005684 } else {
5685 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5686 sizeof(struct sctp_strseq),
5687 (uint8_t *)&strseqbuf);
5688 offset += sizeof(struct sctp_strseq);
5689 if (stseq == NULL) {
5690 break;
5691 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01005692 sid = ntohs(stseq->sid);
5693 mid = (uint32_t)ntohs(stseq->ssn);
Michael Tuexene64d7732016-07-17 15:21:06 +02005694 ordered = 1;
tuexendd729232011-11-01 23:04:43 +00005695 }
5696 /* Convert */
tuexendd729232011-11-01 23:04:43 +00005697
5698 /* now process */
5699
5700 /*
5701 * Ok we now look for the stream/seq on the read queue
5702 * where its not all delivered. If we find it we transmute the
5703 * read entry into a PDI_ABORTED.
5704 */
Michael Tuexen00657ac2016-12-07 21:53:26 +01005705 if (sid >= asoc->streamincnt) {
tuexendd729232011-11-01 23:04:43 +00005706 /* screwed up streams, stop! */
5707 break;
5708 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01005709 if ((asoc->str_of_pdapi == sid) &&
5710 (asoc->ssn_of_pdapi == mid)) {
tuexendd729232011-11-01 23:04:43 +00005711 /* If this is the one we were partially delivering
5712 * now then we no longer are. Note this will change
5713 * with the reassembly re-write.
5714 */
5715 asoc->fragmented_delivery_inprogress = 0;
5716 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01005717 strm = &asoc->strmin[sid];
5718 for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5719 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
Michael Tuexenfdcf7902016-08-06 14:39:31 +02005720 }
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005721 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5722 if ((control->sinfo_stream == sid) &&
5723 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
Michael Tuexen00657ac2016-12-07 21:53:26 +01005724 str_seq = (sid << 16) | (0x0000ffff & mid);
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005725 control->pdapi_aborted = 1;
tuexendd729232011-11-01 23:04:43 +00005726 sv = stcb->asoc.control_pdapi;
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005727 control->end_added = 1;
5728 if (control->on_strm_q == SCTP_ON_ORDERED) {
5729 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5730 if (asoc->size_on_all_streams >= control->length) {
5731 asoc->size_on_all_streams -= control->length;
5732 } else {
Michael Tuexenf6d20c52016-04-18 11:31:05 +02005733#ifdef INVARIANTS
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005734 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5735#else
5736 asoc->size_on_all_streams = 0;
5737#endif
5738 }
5739 sctp_ucount_decr(asoc->cnt_on_all_streams);
5740 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5741 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5742#ifdef INVARIANTS
5743 } else if (control->on_strm_q) {
Michael Tuexeneccb4be2016-04-18 08:58:59 +02005744 panic("strm: %p ctl: %p unknown %d",
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005745 strm, control, control->on_strm_q);
Michael Tuexenf6d20c52016-04-18 11:31:05 +02005746#endif
Michael Tuexene5001952016-04-17 19:25:27 +02005747 }
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005748 control->on_strm_q = 0;
5749 stcb->asoc.control_pdapi = control;
tuexendd729232011-11-01 23:04:43 +00005750 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5751 stcb,
5752 SCTP_PARTIAL_DELIVERY_ABORTED,
5753 (void *)&str_seq,
5754 SCTP_SO_NOT_LOCKED);
5755 stcb->asoc.control_pdapi = sv;
5756 break;
Michael Tuexenbe5e3e72017-07-19 14:44:48 +02005757 } else if ((control->sinfo_stream == sid) &&
5758 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
tuexendd729232011-11-01 23:04:43 +00005759 /* We are past our victim SSN */
5760 break;
5761 }
5762 }
Michael Tuexen00657ac2016-12-07 21:53:26 +01005763 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
tuexendd729232011-11-01 23:04:43 +00005764 /* Update the sequence number */
Michael Tuexen00657ac2016-12-07 21:53:26 +01005765 strm->last_mid_delivered = mid;
tuexendd729232011-11-01 23:04:43 +00005766 }
5767 /* now kick the stream the new way */
Michael Tuexene5001952016-04-17 19:25:27 +02005768 /*sa_ignore NO_NULL_CHK*/
tuexendd729232011-11-01 23:04:43 +00005769 sctp_kick_prsctp_reorder_queue(stcb, strm);
5770 }
5771 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5772 }
5773 /*
5774 * Now slide thing forward.
5775 */
5776 sctp_slide_mapping_arrays(stcb);
tuexendd729232011-11-01 23:04:43 +00005777}