blob: 142bcb134dd64879fcade28dc35993886519a04c [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ursula Braune6727f32017-01-09 16:55:23 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Manage send buffer.
6 * Producer:
7 * Copy user space data into send buffer, if send buffer space available.
8 * Consumer:
9 * Trigger RDMA write into RMBE of peer and send CDC, if RMBE space available.
10 *
11 * Copyright IBM Corp. 2016
12 *
13 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
14 */
15
16#include <linux/net.h>
17#include <linux/rcupdate.h>
18#include <linux/workqueue.h>
Ingo Molnarc3edc402017-02-02 08:35:14 +010019#include <linux/sched/signal.h>
20
Ursula Braune6727f32017-01-09 16:55:23 +010021#include <net/sock.h>
Ursula Braun01d2f7e2018-04-26 17:18:22 +020022#include <net/tcp.h>
Ursula Braune6727f32017-01-09 16:55:23 +010023
24#include "smc.h"
25#include "smc_wr.h"
26#include "smc_cdc.h"
Hans Wippelbe244f22018-06-28 19:05:10 +020027#include "smc_ism.h"
Ursula Braune6727f32017-01-09 16:55:23 +010028#include "smc_tx.h"
29
Ursula Braun18e537c2017-09-21 09:16:33 +020030#define SMC_TX_WORK_DELAY HZ
Ursula Braun01d2f7e2018-04-26 17:18:22 +020031#define SMC_TX_CORK_DELAY (HZ >> 2) /* 250 ms */
Ursula Braun18e537c2017-09-21 09:16:33 +020032
Ursula Braune6727f32017-01-09 16:55:23 +010033/***************************** sndbuf producer *******************************/
34
35/* callback implementation for sk.sk_write_space()
Stefan Rasplde8474e2018-05-23 16:38:11 +020036 * to wakeup sndbuf producers that blocked with smc_tx_wait().
Ursula Braune6727f32017-01-09 16:55:23 +010037 * called under sk_socket lock.
38 */
39static void smc_tx_write_space(struct sock *sk)
40{
41 struct socket *sock = sk->sk_socket;
42 struct smc_sock *smc = smc_sk(sk);
43 struct socket_wq *wq;
44
45 /* similar to sk_stream_write_space */
46 if (atomic_read(&smc->conn.sndbuf_space) && sock) {
47 clear_bit(SOCK_NOSPACE, &sock->flags);
48 rcu_read_lock();
49 wq = rcu_dereference(sk->sk_wq);
50 if (skwq_has_sleeper(wq))
51 wake_up_interruptible_poll(&wq->wait,
Linus Torvaldsa9a08842018-02-11 14:34:03 -080052 EPOLLOUT | EPOLLWRNORM |
53 EPOLLWRBAND);
Ursula Braune6727f32017-01-09 16:55:23 +010054 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
55 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
56 rcu_read_unlock();
57 }
58}
59
Stefan Rasplde8474e2018-05-23 16:38:11 +020060/* Wakeup sndbuf producers that blocked with smc_tx_wait().
Ursula Braune6727f32017-01-09 16:55:23 +010061 * Cf. tcp_data_snd_check()=>tcp_check_space()=>tcp_new_space().
62 */
63void smc_tx_sndbuf_nonfull(struct smc_sock *smc)
64{
65 if (smc->sk.sk_socket &&
66 test_bit(SOCK_NOSPACE, &smc->sk.sk_socket->flags))
67 smc->sk.sk_write_space(&smc->sk);
68}
69
Stefan Rasplde8474e2018-05-23 16:38:11 +020070/* blocks sndbuf producer until at least one byte of free space available
71 * or urgent Byte was consumed
72 */
73static int smc_tx_wait(struct smc_sock *smc, int flags)
Ursula Braune6727f32017-01-09 16:55:23 +010074{
75 DEFINE_WAIT_FUNC(wait, woken_wake_function);
76 struct smc_connection *conn = &smc->conn;
77 struct sock *sk = &smc->sk;
78 bool noblock;
79 long timeo;
80 int rc = 0;
81
82 /* similar to sk_stream_wait_memory */
83 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
84 noblock = timeo ? false : true;
85 add_wait_queue(sk_sleep(sk), &wait);
86 while (1) {
87 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
88 if (sk->sk_err ||
89 (sk->sk_shutdown & SEND_SHUTDOWN) ||
90 conn->local_tx_ctrl.conn_state_flags.peer_done_writing) {
91 rc = -EPIPE;
92 break;
93 }
Ursula Braunaa377e62018-01-24 10:28:17 +010094 if (smc_cdc_rxed_any_close(conn)) {
Ursula Braune6727f32017-01-09 16:55:23 +010095 rc = -ECONNRESET;
96 break;
97 }
98 if (!timeo) {
99 if (noblock)
100 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
101 rc = -EAGAIN;
102 break;
103 }
104 if (signal_pending(current)) {
105 rc = sock_intr_errno(timeo);
106 break;
107 }
108 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
Stefan Rasplde8474e2018-05-23 16:38:11 +0200109 if (atomic_read(&conn->sndbuf_space) && !conn->urg_tx_pend)
110 break; /* at least 1 byte of free & no urgent data */
Ursula Braune6727f32017-01-09 16:55:23 +0100111 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Ursula Braune6727f32017-01-09 16:55:23 +0100112 sk_wait_event(sk, &timeo,
113 sk->sk_err ||
114 (sk->sk_shutdown & SEND_SHUTDOWN) ||
Ursula Braunaa377e62018-01-24 10:28:17 +0100115 smc_cdc_rxed_any_close(conn) ||
Stefan Rasplde8474e2018-05-23 16:38:11 +0200116 (atomic_read(&conn->sndbuf_space) &&
117 !conn->urg_tx_pend),
Ursula Braune6727f32017-01-09 16:55:23 +0100118 &wait);
Ursula Braune6727f32017-01-09 16:55:23 +0100119 }
120 remove_wait_queue(sk_sleep(sk), &wait);
121 return rc;
122}
123
Ursula Braun01d2f7e2018-04-26 17:18:22 +0200124static bool smc_tx_is_corked(struct smc_sock *smc)
125{
126 struct tcp_sock *tp = tcp_sk(smc->clcsock->sk);
127
128 return (tp->nonagle & TCP_NAGLE_CORK) ? true : false;
129}
130
Ursula Braune6727f32017-01-09 16:55:23 +0100131/* sndbuf producer: main API called by socket layer.
132 * called under sock lock.
133 */
134int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
135{
136 size_t copylen, send_done = 0, send_remaining = len;
137 size_t chunk_len, chunk_off, chunk_len_sum;
138 struct smc_connection *conn = &smc->conn;
139 union smc_host_cursor prep;
140 struct sock *sk = &smc->sk;
141 char *sndbuf_base;
142 int tx_cnt_prep;
143 int writespace;
144 int rc, chunk;
145
146 /* This should be in poll */
147 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
148
149 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
150 rc = -EPIPE;
151 goto out_err;
152 }
153
154 while (msg_data_left(msg)) {
155 if (sk->sk_state == SMC_INIT)
156 return -ENOTCONN;
157 if (smc->sk.sk_shutdown & SEND_SHUTDOWN ||
Ursula Braunb38d7322017-01-09 16:55:25 +0100158 (smc->sk.sk_err == ECONNABORTED) ||
Ursula Braune6727f32017-01-09 16:55:23 +0100159 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort)
160 return -EPIPE;
161 if (smc_cdc_rxed_any_close(conn))
162 return send_done ?: -ECONNRESET;
163
Stefan Rasplde8474e2018-05-23 16:38:11 +0200164 if (msg->msg_flags & MSG_OOB)
165 conn->local_tx_ctrl.prod_flags.urg_data_pending = 1;
166
167 if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) {
168 rc = smc_tx_wait(smc, msg->msg_flags);
Ursula Braune6727f32017-01-09 16:55:23 +0100169 if (rc) {
170 if (send_done)
171 return send_done;
172 goto out_err;
173 }
174 continue;
175 }
176
177 /* initialize variables for 1st iteration of subsequent loop */
Stefan Rasplde8474e2018-05-23 16:38:11 +0200178 /* could be just 1 byte, even after smc_tx_wait above */
Ursula Braune6727f32017-01-09 16:55:23 +0100179 writespace = atomic_read(&conn->sndbuf_space);
180 /* not more than what user space asked for */
181 copylen = min_t(size_t, send_remaining, writespace);
182 /* determine start of sndbuf */
183 sndbuf_base = conn->sndbuf_desc->cpu_addr;
184 smc_curs_write(&prep,
185 smc_curs_read(&conn->tx_curs_prep, conn),
186 conn);
187 tx_cnt_prep = prep.count;
188 /* determine chunks where to write into sndbuf */
189 /* either unwrapped case, or 1st chunk of wrapped case */
Hans Wippel69cb7dc2018-05-18 09:34:10 +0200190 chunk_len = min_t(size_t, copylen, conn->sndbuf_desc->len -
191 tx_cnt_prep);
Ursula Braune6727f32017-01-09 16:55:23 +0100192 chunk_len_sum = chunk_len;
193 chunk_off = tx_cnt_prep;
Ursula Braun10428dd2017-07-28 13:56:22 +0200194 smc_sndbuf_sync_sg_for_cpu(conn);
Ursula Braune6727f32017-01-09 16:55:23 +0100195 for (chunk = 0; chunk < 2; chunk++) {
196 rc = memcpy_from_msg(sndbuf_base + chunk_off,
197 msg, chunk_len);
198 if (rc) {
Ursula Braun10428dd2017-07-28 13:56:22 +0200199 smc_sndbuf_sync_sg_for_device(conn);
Ursula Braune6727f32017-01-09 16:55:23 +0100200 if (send_done)
201 return send_done;
202 goto out_err;
203 }
204 send_done += chunk_len;
205 send_remaining -= chunk_len;
206
207 if (chunk_len_sum == copylen)
208 break; /* either on 1st or 2nd iteration */
209 /* prepare next (== 2nd) iteration */
210 chunk_len = copylen - chunk_len; /* remainder */
211 chunk_len_sum += chunk_len;
212 chunk_off = 0; /* modulo offset in send ring buffer */
213 }
Ursula Braun10428dd2017-07-28 13:56:22 +0200214 smc_sndbuf_sync_sg_for_device(conn);
Ursula Braune6727f32017-01-09 16:55:23 +0100215 /* update cursors */
Hans Wippel69cb7dc2018-05-18 09:34:10 +0200216 smc_curs_add(conn->sndbuf_desc->len, &prep, copylen);
Ursula Braune6727f32017-01-09 16:55:23 +0100217 smc_curs_write(&conn->tx_curs_prep,
218 smc_curs_read(&prep, conn),
219 conn);
220 /* increased in send tasklet smc_cdc_tx_handler() */
221 smp_mb__before_atomic();
222 atomic_sub(copylen, &conn->sndbuf_space);
Hans Wippel69cb7dc2018-05-18 09:34:10 +0200223 /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
Ursula Braune6727f32017-01-09 16:55:23 +0100224 smp_mb__after_atomic();
225 /* since we just produced more new data into sndbuf,
226 * trigger sndbuf consumer: RDMA write into peer RMBE and CDC
227 */
Stefan Rasplde8474e2018-05-23 16:38:11 +0200228 if ((msg->msg_flags & MSG_OOB) && !send_remaining)
229 conn->urg_tx_pend = true;
Ursula Braun01d2f7e2018-04-26 17:18:22 +0200230 if ((msg->msg_flags & MSG_MORE || smc_tx_is_corked(smc)) &&
231 (atomic_read(&conn->sndbuf_space) >
Hans Wippel69cb7dc2018-05-18 09:34:10 +0200232 (conn->sndbuf_desc->len >> 1)))
Ursula Braun01d2f7e2018-04-26 17:18:22 +0200233 /* for a corked socket defer the RDMA writes if there
234 * is still sufficient sndbuf_space available
235 */
236 schedule_delayed_work(&conn->tx_work,
237 SMC_TX_CORK_DELAY);
238 else
239 smc_tx_sndbuf_nonempty(conn);
Ursula Braune6727f32017-01-09 16:55:23 +0100240 } /* while (msg_data_left(msg)) */
241
242 return send_done;
243
244out_err:
245 rc = sk_stream_error(sk, msg->msg_flags, rc);
246 /* make sure we wake any epoll edge trigger waiter */
247 if (unlikely(rc == -EAGAIN))
248 sk->sk_write_space(sk);
249 return rc;
250}
251
252/***************************** sndbuf consumer *******************************/
253
Hans Wippelbe244f22018-06-28 19:05:10 +0200254/* sndbuf consumer: actual data transfer of one target chunk with ISM write */
255int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len,
256 u32 offset, int signal)
257{
258 struct smc_ism_position pos;
259 int rc;
260
261 memset(&pos, 0, sizeof(pos));
262 pos.token = conn->peer_token;
263 pos.index = conn->peer_rmbe_idx;
264 pos.offset = conn->tx_off + offset;
265 pos.signal = signal;
266 rc = smc_ism_write(conn->lgr->smcd, &pos, data, len);
267 if (rc)
268 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
269 return rc;
270}
271
Ursula Braune6727f32017-01-09 16:55:23 +0100272/* sndbuf consumer: actual data transfer of one target chunk with RDMA write */
273static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
274 int num_sges, struct ib_sge sges[])
275{
276 struct smc_link_group *lgr = conn->lgr;
277 struct ib_send_wr *failed_wr = NULL;
278 struct ib_rdma_wr rdma_wr;
279 struct smc_link *link;
280 int rc;
281
282 memset(&rdma_wr, 0, sizeof(rdma_wr));
283 link = &lgr->lnk[SMC_SINGLE_LINK];
284 rdma_wr.wr.wr_id = smc_wr_tx_get_next_wr_id(link);
285 rdma_wr.wr.sg_list = sges;
286 rdma_wr.wr.num_sge = num_sges;
287 rdma_wr.wr.opcode = IB_WR_RDMA_WRITE;
288 rdma_wr.remote_addr =
289 lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr +
290 /* RMBE within RMB */
Hans Wippel95d8d2632018-05-18 09:34:13 +0200291 conn->tx_off +
Ursula Braune6727f32017-01-09 16:55:23 +0100292 /* offset within RMBE */
293 peer_rmbe_offset;
294 rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
295 rc = ib_post_send(link->roce_qp, &rdma_wr.wr, &failed_wr);
Ursula Braunb4772b32018-01-25 11:15:33 +0100296 if (rc) {
Ursula Braune6727f32017-01-09 16:55:23 +0100297 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
Ursula Braunb4772b32018-01-25 11:15:33 +0100298 smc_lgr_terminate(lgr);
299 }
Ursula Braune6727f32017-01-09 16:55:23 +0100300 return rc;
301}
302
303/* sndbuf consumer */
304static inline void smc_tx_advance_cursors(struct smc_connection *conn,
305 union smc_host_cursor *prod,
306 union smc_host_cursor *sent,
307 size_t len)
308{
309 smc_curs_add(conn->peer_rmbe_size, prod, len);
310 /* increased in recv tasklet smc_cdc_msg_rcv() */
311 smp_mb__before_atomic();
312 /* data in flight reduces usable snd_wnd */
313 atomic_sub(len, &conn->peer_rmbe_space);
314 /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
315 smp_mb__after_atomic();
Hans Wippel69cb7dc2018-05-18 09:34:10 +0200316 smc_curs_add(conn->sndbuf_desc->len, sent, len);
Ursula Braune6727f32017-01-09 16:55:23 +0100317}
318
Hans Wippelbe244f22018-06-28 19:05:10 +0200319/* SMC-R helper for smc_tx_rdma_writes() */
320static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
321 size_t src_off, size_t src_len,
322 size_t dst_off, size_t dst_len)
323{
324 dma_addr_t dma_addr =
325 sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl);
326 struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
327 int src_len_sum = src_len, dst_len_sum = dst_len;
328 struct ib_sge sges[SMC_IB_MAX_SEND_SGE];
329 int sent_count = src_off;
330 int srcchunk, dstchunk;
331 int num_sges;
332 int rc;
333
334 for (dstchunk = 0; dstchunk < 2; dstchunk++) {
335 num_sges = 0;
336 for (srcchunk = 0; srcchunk < 2; srcchunk++) {
337 sges[srcchunk].addr = dma_addr + src_off;
338 sges[srcchunk].length = src_len;
339 sges[srcchunk].lkey = link->roce_pd->local_dma_lkey;
340 num_sges++;
341
342 src_off += src_len;
343 if (src_off >= conn->sndbuf_desc->len)
344 src_off -= conn->sndbuf_desc->len;
345 /* modulo in send ring */
346 if (src_len_sum == dst_len)
347 break; /* either on 1st or 2nd iteration */
348 /* prepare next (== 2nd) iteration */
349 src_len = dst_len - src_len; /* remainder */
350 src_len_sum += src_len;
351 }
352 rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges);
353 if (rc)
354 return rc;
355 if (dst_len_sum == len)
356 break; /* either on 1st or 2nd iteration */
357 /* prepare next (== 2nd) iteration */
358 dst_off = 0; /* modulo offset in RMBE ring buffer */
359 dst_len = len - dst_len; /* remainder */
360 dst_len_sum += dst_len;
361 src_len = min_t(int, dst_len, conn->sndbuf_desc->len -
362 sent_count);
363 src_len_sum = src_len;
364 }
365 return 0;
366}
367
368/* SMC-D helper for smc_tx_rdma_writes() */
369static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len,
370 size_t src_off, size_t src_len,
371 size_t dst_off, size_t dst_len)
372{
373 int src_len_sum = src_len, dst_len_sum = dst_len;
374 int srcchunk, dstchunk;
375 int rc;
376
377 for (dstchunk = 0; dstchunk < 2; dstchunk++) {
378 for (srcchunk = 0; srcchunk < 2; srcchunk++) {
379 void *data = conn->sndbuf_desc->cpu_addr + src_off;
380
381 rc = smcd_tx_ism_write(conn, data, src_len, dst_off +
382 sizeof(struct smcd_cdc_msg), 0);
383 if (rc)
384 return rc;
385 dst_off += src_len;
386 src_off += src_len;
387 if (src_off >= conn->sndbuf_desc->len)
388 src_off -= conn->sndbuf_desc->len;
389 /* modulo in send ring */
390 if (src_len_sum == dst_len)
391 break; /* either on 1st or 2nd iteration */
392 /* prepare next (== 2nd) iteration */
393 src_len = dst_len - src_len; /* remainder */
394 src_len_sum += src_len;
395 }
396 if (dst_len_sum == len)
397 break; /* either on 1st or 2nd iteration */
398 /* prepare next (== 2nd) iteration */
399 dst_off = 0; /* modulo offset in RMBE ring buffer */
400 dst_len = len - dst_len; /* remainder */
401 dst_len_sum += dst_len;
402 src_len = min_t(int, dst_len, conn->sndbuf_desc->len - src_off);
403 src_len_sum = src_len;
404 }
405 return 0;
406}
407
Ursula Braune6727f32017-01-09 16:55:23 +0100408/* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit;
409 * usable snd_wnd as max transmit
410 */
411static int smc_tx_rdma_writes(struct smc_connection *conn)
412{
Hans Wippelbe244f22018-06-28 19:05:10 +0200413 size_t len, src_len, dst_off, dst_len; /* current chunk values */
Ursula Braune6727f32017-01-09 16:55:23 +0100414 union smc_host_cursor sent, prep, prod, cons;
Stefan Rasplde8474e2018-05-23 16:38:11 +0200415 struct smc_cdc_producer_flags *pflags;
Ursula Braune6727f32017-01-09 16:55:23 +0100416 int to_send, rmbespace;
Ursula Braune6727f32017-01-09 16:55:23 +0100417 int rc;
418
419 /* source: sndbuf */
420 smc_curs_write(&sent, smc_curs_read(&conn->tx_curs_sent, conn), conn);
421 smc_curs_write(&prep, smc_curs_read(&conn->tx_curs_prep, conn), conn);
422 /* cf. wmem_alloc - (snd_max - snd_una) */
Hans Wippel69cb7dc2018-05-18 09:34:10 +0200423 to_send = smc_curs_diff(conn->sndbuf_desc->len, &sent, &prep);
Ursula Braune6727f32017-01-09 16:55:23 +0100424 if (to_send <= 0)
425 return 0;
426
427 /* destination: RMBE */
428 /* cf. snd_wnd */
429 rmbespace = atomic_read(&conn->peer_rmbe_space);
430 if (rmbespace <= 0)
431 return 0;
432 smc_curs_write(&prod,
433 smc_curs_read(&conn->local_tx_ctrl.prod, conn),
434 conn);
435 smc_curs_write(&cons,
436 smc_curs_read(&conn->local_rx_ctrl.cons, conn),
437 conn);
438
439 /* if usable snd_wnd closes ask peer to advertise once it opens again */
Stefan Rasplde8474e2018-05-23 16:38:11 +0200440 pflags = &conn->local_tx_ctrl.prod_flags;
441 pflags->write_blocked = (to_send >= rmbespace);
Ursula Braune6727f32017-01-09 16:55:23 +0100442 /* cf. usable snd_wnd */
443 len = min(to_send, rmbespace);
444
445 /* initialize variables for first iteration of subsequent nested loop */
Ursula Braune6727f32017-01-09 16:55:23 +0100446 dst_off = prod.count;
447 if (prod.wrap == cons.wrap) {
448 /* the filled destination area is unwrapped,
449 * hence the available free destination space is wrapped
450 * and we need 2 destination chunks of sum len; start with 1st
451 * which is limited by what's available in sndbuf
452 */
453 dst_len = min_t(size_t,
454 conn->peer_rmbe_size - prod.count, len);
455 } else {
456 /* the filled destination area is wrapped,
457 * hence the available free destination space is unwrapped
458 * and we need a single destination chunk of entire len
459 */
460 dst_len = len;
461 }
Ursula Braune6727f32017-01-09 16:55:23 +0100462 /* dst_len determines the maximum src_len */
Hans Wippel69cb7dc2018-05-18 09:34:10 +0200463 if (sent.count + dst_len <= conn->sndbuf_desc->len) {
Ursula Braune6727f32017-01-09 16:55:23 +0100464 /* unwrapped src case: single chunk of entire dst_len */
465 src_len = dst_len;
466 } else {
467 /* wrapped src case: 2 chunks of sum dst_len; start with 1st: */
Hans Wippel69cb7dc2018-05-18 09:34:10 +0200468 src_len = conn->sndbuf_desc->len - sent.count;
Ursula Braune6727f32017-01-09 16:55:23 +0100469 }
Hans Wippelbe244f22018-06-28 19:05:10 +0200470
471 if (conn->lgr->is_smcd)
472 rc = smcd_tx_rdma_writes(conn, len, sent.count, src_len,
473 dst_off, dst_len);
474 else
475 rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len,
476 dst_off, dst_len);
477 if (rc)
478 return rc;
Ursula Braune6727f32017-01-09 16:55:23 +0100479
Stefan Rasplde8474e2018-05-23 16:38:11 +0200480 if (conn->urg_tx_pend && len == to_send)
481 pflags->urg_data_present = 1;
Ursula Braune6727f32017-01-09 16:55:23 +0100482 smc_tx_advance_cursors(conn, &prod, &sent, len);
483 /* update connection's cursors with advanced local cursors */
484 smc_curs_write(&conn->local_tx_ctrl.prod,
485 smc_curs_read(&prod, conn),
486 conn);
487 /* dst: peer RMBE */
488 smc_curs_write(&conn->tx_curs_sent,
489 smc_curs_read(&sent, conn),
490 conn);
491 /* src: local sndbuf */
492
493 return 0;
494}
495
496/* Wakeup sndbuf consumers from any context (IRQ or process)
497 * since there is more data to transmit; usable snd_wnd as max transmit
498 */
Hans Wippelbe244f22018-06-28 19:05:10 +0200499static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
Ursula Braune6727f32017-01-09 16:55:23 +0100500{
Stefan Rasplde8474e2018-05-23 16:38:11 +0200501 struct smc_cdc_producer_flags *pflags;
Ursula Braune6727f32017-01-09 16:55:23 +0100502 struct smc_cdc_tx_pend *pend;
503 struct smc_wr_buf *wr_buf;
504 int rc;
505
506 spin_lock_bh(&conn->send_lock);
Ursula Braun51957bc2017-09-21 09:17:34 +0200507 rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend);
Ursula Braune6727f32017-01-09 16:55:23 +0100508 if (rc < 0) {
509 if (rc == -EBUSY) {
Ursula Braunb38d7322017-01-09 16:55:25 +0100510 struct smc_sock *smc =
511 container_of(conn, struct smc_sock, conn);
512
513 if (smc->sk.sk_err == ECONNABORTED) {
514 rc = sock_error(&smc->sk);
515 goto out_unlock;
516 }
Ursula Braune6727f32017-01-09 16:55:23 +0100517 rc = 0;
Ursula Braun1a0a04c2018-01-25 11:15:36 +0100518 if (conn->alert_token_local) /* connection healthy */
Ursula Braun01d2f7e2018-04-26 17:18:22 +0200519 mod_delayed_work(system_wq, &conn->tx_work,
520 SMC_TX_WORK_DELAY);
Ursula Braune6727f32017-01-09 16:55:23 +0100521 }
522 goto out_unlock;
523 }
524
Stefan Rasplde8474e2018-05-23 16:38:11 +0200525 if (!conn->local_tx_ctrl.prod_flags.urg_data_present) {
526 rc = smc_tx_rdma_writes(conn);
527 if (rc) {
528 smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK],
529 (struct smc_wr_tx_pend_priv *)pend);
530 goto out_unlock;
531 }
Ursula Braune6727f32017-01-09 16:55:23 +0100532 }
533
534 rc = smc_cdc_msg_send(conn, wr_buf, pend);
Stefan Rasplde8474e2018-05-23 16:38:11 +0200535 pflags = &conn->local_tx_ctrl.prod_flags;
536 if (!rc && pflags->urg_data_present) {
537 pflags->urg_data_pending = 0;
538 pflags->urg_data_present = 0;
539 }
Ursula Braune6727f32017-01-09 16:55:23 +0100540
541out_unlock:
542 spin_unlock_bh(&conn->send_lock);
543 return rc;
544}
545
Hans Wippelbe244f22018-06-28 19:05:10 +0200546static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn)
547{
548 struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
549 int rc = 0;
550
551 spin_lock_bh(&conn->send_lock);
552 if (!pflags->urg_data_present)
553 rc = smc_tx_rdma_writes(conn);
554 if (!rc)
555 rc = smcd_cdc_msg_send(conn);
556
557 if (!rc && pflags->urg_data_present) {
558 pflags->urg_data_pending = 0;
559 pflags->urg_data_present = 0;
560 }
561 spin_unlock_bh(&conn->send_lock);
562 return rc;
563}
564
565int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
566{
567 int rc;
568
569 if (conn->lgr->is_smcd)
570 rc = smcd_tx_sndbuf_nonempty(conn);
571 else
572 rc = smcr_tx_sndbuf_nonempty(conn);
573
574 return rc;
575}
576
Ursula Braune6727f32017-01-09 16:55:23 +0100577/* Wakeup sndbuf consumers from process context
578 * since there is more data to transmit
579 */
Eric Dumazetbe7f3e52018-05-17 03:54:21 -0700580void smc_tx_work(struct work_struct *work)
Ursula Braune6727f32017-01-09 16:55:23 +0100581{
Ursula Braun18e537c2017-09-21 09:16:33 +0200582 struct smc_connection *conn = container_of(to_delayed_work(work),
Ursula Braune6727f32017-01-09 16:55:23 +0100583 struct smc_connection,
584 tx_work);
585 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
Ursula Braun90cacb22017-04-10 14:57:59 +0200586 int rc;
Ursula Braune6727f32017-01-09 16:55:23 +0100587
588 lock_sock(&smc->sk);
Ursula Braun1a0a04c2018-01-25 11:15:36 +0100589 if (smc->sk.sk_err ||
590 !conn->alert_token_local ||
591 conn->local_rx_ctrl.conn_state_flags.peer_conn_abort)
592 goto out;
593
Ursula Braun90cacb22017-04-10 14:57:59 +0200594 rc = smc_tx_sndbuf_nonempty(conn);
595 if (!rc && conn->local_rx_ctrl.prod_flags.write_blocked &&
596 !atomic_read(&conn->bytes_to_rcv))
597 conn->local_rx_ctrl.prod_flags.write_blocked = 0;
Ursula Braun1a0a04c2018-01-25 11:15:36 +0100598
599out:
Ursula Braune6727f32017-01-09 16:55:23 +0100600 release_sock(&smc->sk);
601}
602
Stefan Rasplde8474e2018-05-23 16:38:11 +0200603void smc_tx_consumer_update(struct smc_connection *conn, bool force)
Ursula Braun952310c2017-01-09 16:55:24 +0100604{
Ursula Braune82f2e32018-06-28 19:05:06 +0200605 union smc_host_cursor cfed, cons, prod;
606 int sender_free = conn->rmb_desc->len;
Ursula Braun6b5771a2017-12-07 13:38:48 +0100607 int to_confirm;
Ursula Braun952310c2017-01-09 16:55:24 +0100608
609 smc_curs_write(&cons,
610 smc_curs_read(&conn->local_tx_ctrl.cons, conn),
611 conn);
612 smc_curs_write(&cfed,
613 smc_curs_read(&conn->rx_curs_confirmed, conn),
614 conn);
Hans Wippel69cb7dc2018-05-18 09:34:10 +0200615 to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons);
Ursula Braune82f2e32018-06-28 19:05:06 +0200616 if (to_confirm > conn->rmbe_update_limit) {
617 smc_curs_write(&prod,
618 smc_curs_read(&conn->local_rx_ctrl.prod, conn),
619 conn);
620 sender_free = conn->rmb_desc->len -
621 smc_curs_diff(conn->rmb_desc->len, &prod, &cfed);
622 }
Ursula Braun952310c2017-01-09 16:55:24 +0100623
624 if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
Stefan Rasplde8474e2018-05-23 16:38:11 +0200625 force ||
Ursula Braun952310c2017-01-09 16:55:24 +0100626 ((to_confirm > conn->rmbe_update_limit) &&
Ursula Braune82f2e32018-06-28 19:05:06 +0200627 ((sender_free <= (conn->rmb_desc->len / 2)) ||
Ursula Braun952310c2017-01-09 16:55:24 +0100628 conn->local_rx_ctrl.prod_flags.write_blocked))) {
Ursula Braun1a0a04c2018-01-25 11:15:36 +0100629 if ((smc_cdc_get_slot_and_msg_send(conn) < 0) &&
630 conn->alert_token_local) { /* connection healthy */
Ursula Braun18e537c2017-09-21 09:16:33 +0200631 schedule_delayed_work(&conn->tx_work,
632 SMC_TX_WORK_DELAY);
Ursula Braun952310c2017-01-09 16:55:24 +0100633 return;
634 }
635 smc_curs_write(&conn->rx_curs_confirmed,
636 smc_curs_read(&conn->local_tx_ctrl.cons, conn),
637 conn);
638 conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
639 }
640 if (conn->local_rx_ctrl.prod_flags.write_blocked &&
641 !atomic_read(&conn->bytes_to_rcv))
642 conn->local_rx_ctrl.prod_flags.write_blocked = 0;
643}
644
Ursula Braune6727f32017-01-09 16:55:23 +0100645/***************************** send initialize *******************************/
646
647/* Initialize send properties on connection establishment. NB: not __init! */
648void smc_tx_init(struct smc_sock *smc)
649{
650 smc->sk.sk_write_space = smc_tx_write_space;
Ursula Braune6727f32017-01-09 16:55:23 +0100651}