blob: 051e35c1e7c65a76eea0e57220f106b645cd1c24 [file] [log] [blame]
Andy Grover00e0f342009-02-24 15:30:23 +00001/*
Ka-Cheong Poon1e2b44e2018-07-23 20:51:22 -07002 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
Andy Grover00e0f342009-02-24 15:30:23 +00003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/list.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040036#include <linux/export.h>
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -070037#include <net/ipv6.h>
38#include <net/inet6_hashtables.h>
Ka-Cheong Poon1e2b44e2018-07-23 20:51:22 -070039#include <net/addrconf.h>
Andy Grover00e0f342009-02-24 15:30:23 +000040
41#include "rds.h"
42#include "loop.h"
Andy Grover00e0f342009-02-24 15:30:23 +000043
44#define RDS_CONNECTION_HASH_BITS 12
45#define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS)
46#define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1)
47
48/* converting this to RCU is a chore for another day.. */
49static DEFINE_SPINLOCK(rds_conn_lock);
50static unsigned long rds_conn_count;
51static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES];
52static struct kmem_cache *rds_conn_slab;
53
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -070054static struct hlist_head *rds_conn_bucket(const struct in6_addr *laddr,
55 const struct in6_addr *faddr)
Andy Grover00e0f342009-02-24 15:30:23 +000056{
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -070057 static u32 rds6_hash_secret __read_mostly;
Hannes Frederic Sowa1bbdcee2013-10-19 21:48:57 +020058 static u32 rds_hash_secret __read_mostly;
59
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -070060 u32 lhash, fhash, hash;
Hannes Frederic Sowa1bbdcee2013-10-19 21:48:57 +020061
62 net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret));
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -070063 net_get_random_once(&rds6_hash_secret, sizeof(rds6_hash_secret));
Hannes Frederic Sowa1bbdcee2013-10-19 21:48:57 +020064
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -070065 lhash = (__force u32)laddr->s6_addr32[3];
66 fhash = __ipv6_addr_jhash(faddr, rds6_hash_secret);
67 hash = __inet6_ehashfn(lhash, 0, fhash, 0, rds_hash_secret);
68
Andy Grover00e0f342009-02-24 15:30:23 +000069 return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
70}
71
72#define rds_conn_info_set(var, test, suffix) do { \
73 if (test) \
74 var |= RDS_INFO_CONNECTION_FLAG_##suffix; \
75} while (0)
76
Chris Masonbcf50ef2010-05-11 15:15:15 -070077/* rcu read lock must be held or the connection spinlock */
Sowmini Varadhan8f384c02015-09-03 16:24:52 -040078static struct rds_connection *rds_conn_lookup(struct net *net,
79 struct hlist_head *head,
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -070080 const struct in6_addr *laddr,
81 const struct in6_addr *faddr,
82 struct rds_transport *trans,
83 int dev_if)
Andy Grover00e0f342009-02-24 15:30:23 +000084{
85 struct rds_connection *conn, *ret = NULL;
Andy Grover00e0f342009-02-24 15:30:23 +000086
Sasha Levinb67bfe02013-02-27 17:06:00 -080087 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -070088 if (ipv6_addr_equal(&conn->c_faddr, faddr) &&
89 ipv6_addr_equal(&conn->c_laddr, laddr) &&
90 conn->c_trans == trans &&
91 net == rds_conn_net(conn) &&
92 conn->c_dev_if == dev_if) {
Andy Grover00e0f342009-02-24 15:30:23 +000093 ret = conn;
94 break;
95 }
96 }
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -070097 rdsdebug("returning conn %p for %pI6c -> %pI6c\n", ret,
98 laddr, faddr);
Andy Grover00e0f342009-02-24 15:30:23 +000099 return ret;
100}
101
102/*
103 * This is called by transports as they're bringing down a connection.
104 * It clears partial message state so that the transport can start sending
105 * and receiving over this connection again in the future. It is up to
106 * the transport to have serialized this call with its send and recv.
107 */
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700108static void rds_conn_path_reset(struct rds_conn_path *cp)
Andy Grover00e0f342009-02-24 15:30:23 +0000109{
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700110 struct rds_connection *conn = cp->cp_conn;
111
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700112 rdsdebug("connection %pI6c to %pI6c reset\n",
113 &conn->c_laddr, &conn->c_faddr);
Andy Grover00e0f342009-02-24 15:30:23 +0000114
115 rds_stats_inc(s_conn_reset);
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700116 rds_send_path_reset(cp);
117 cp->cp_flags = 0;
Andy Grover00e0f342009-02-24 15:30:23 +0000118
119 /* Do not clear next_rx_seq here, else we cannot distinguish
120 * retransmitted packets from new packets, and will hand all
121 * of them to the application. That is not consistent with the
122 * reliability guarantees of RDS. */
123}
124
Sowmini Varadhan1c5113c2016-06-13 09:44:40 -0700125static void __rds_conn_path_init(struct rds_connection *conn,
126 struct rds_conn_path *cp, bool is_outgoing)
127{
128 spin_lock_init(&cp->cp_lock);
129 cp->cp_next_tx_seq = 1;
130 init_waitqueue_head(&cp->cp_waitq);
131 INIT_LIST_HEAD(&cp->cp_send_queue);
132 INIT_LIST_HEAD(&cp->cp_retrans);
133
134 cp->cp_conn = conn;
135 atomic_set(&cp->cp_state, RDS_CONN_DOWN);
136 cp->cp_send_gen = 0;
Sowmini Varadhan1c5113c2016-06-13 09:44:40 -0700137 cp->cp_reconnect_jiffies = 0;
138 INIT_DELAYED_WORK(&cp->cp_send_w, rds_send_worker);
139 INIT_DELAYED_WORK(&cp->cp_recv_w, rds_recv_worker);
140 INIT_DELAYED_WORK(&cp->cp_conn_w, rds_connect_worker);
141 INIT_WORK(&cp->cp_down_w, rds_shutdown_worker);
142 mutex_init(&cp->cp_cm_lock);
143 cp->cp_flags = 0;
144}
145
Andy Grover00e0f342009-02-24 15:30:23 +0000146/*
147 * There is only every one 'conn' for a given pair of addresses in the
148 * system at a time. They contain messages to be retransmitted and so
149 * span the lifetime of the actual underlying transport connections.
150 *
151 * For now they are not garbage collected once they're created. They
152 * are torn down as the module is removed, if ever.
153 */
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400154static struct rds_connection *__rds_conn_create(struct net *net,
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700155 const struct in6_addr *laddr,
156 const struct in6_addr *faddr,
157 struct rds_transport *trans,
158 gfp_t gfp,
159 int is_outgoing,
160 int dev_if)
Andy Grover00e0f342009-02-24 15:30:23 +0000161{
Andy Grovercb244052009-07-17 13:13:36 +0000162 struct rds_connection *conn, *parent = NULL;
Andy Grover00e0f342009-02-24 15:30:23 +0000163 struct hlist_head *head = rds_conn_bucket(laddr, faddr);
Zach Brown5adb5bc2010-07-23 10:32:31 -0700164 struct rds_transport *loop_trans;
Andy Grover00e0f342009-02-24 15:30:23 +0000165 unsigned long flags;
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700166 int ret, i;
Sowmini Varadhan840df162017-08-02 10:34:31 -0700167 int npaths = (trans->t_mp_capable ? RDS_MPATH_WORKERS : 1);
Andy Grover00e0f342009-02-24 15:30:23 +0000168
Chris Masonbcf50ef2010-05-11 15:15:15 -0700169 rcu_read_lock();
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700170 conn = rds_conn_lookup(net, head, laddr, faddr, trans, dev_if);
171 if (conn &&
172 conn->c_loopback &&
173 conn->c_trans != &rds_loop_transport &&
174 ipv6_addr_equal(laddr, faddr) &&
175 !is_outgoing) {
Andy Grover00e0f342009-02-24 15:30:23 +0000176 /* This is a looped back IB connection, and we're
177 * called by the code handling the incoming connect.
178 * We need a second connection object into which we
179 * can stick the other QP. */
180 parent = conn;
181 conn = parent->c_passive;
182 }
Chris Masonbcf50ef2010-05-11 15:15:15 -0700183 rcu_read_unlock();
Andy Grover00e0f342009-02-24 15:30:23 +0000184 if (conn)
185 goto out;
186
Wei Yongjun05a178e2009-04-09 14:09:44 +0000187 conn = kmem_cache_zalloc(rds_conn_slab, gfp);
Andy Grover8690bfa2010-01-12 11:56:44 -0800188 if (!conn) {
Andy Grover00e0f342009-02-24 15:30:23 +0000189 conn = ERR_PTR(-ENOMEM);
190 goto out;
191 }
Sowmini Varadhan840df162017-08-02 10:34:31 -0700192 conn->c_path = kcalloc(npaths, sizeof(struct rds_conn_path), gfp);
193 if (!conn->c_path) {
194 kmem_cache_free(rds_conn_slab, conn);
195 conn = ERR_PTR(-ENOMEM);
196 goto out;
197 }
Andy Grover00e0f342009-02-24 15:30:23 +0000198
Andy Grover00e0f342009-02-24 15:30:23 +0000199 INIT_HLIST_NODE(&conn->c_hash_node);
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700200 conn->c_laddr = *laddr;
201 conn->c_isv6 = !ipv6_addr_v4mapped(laddr);
202 conn->c_faddr = *faddr;
203 conn->c_dev_if = dev_if;
Ka-Cheong Poon1e2b44e2018-07-23 20:51:22 -0700204 /* If the local address is link local, set c_bound_if to be the
205 * index used for this connection. Otherwise, set it to 0 as
206 * the socket is not bound to an interface. c_bound_if is used
207 * to look up a socket when a packet is received
208 */
209 if (ipv6_addr_type(laddr) & IPV6_ADDR_LINKLOCAL)
210 conn->c_bound_if = dev_if;
211 else
212 conn->c_bound_if = 0;
Andy Grover00e0f342009-02-24 15:30:23 +0000213
Sowmini Varadhan1c5113c2016-06-13 09:44:40 -0700214 rds_conn_net_set(conn, net);
Andy Grover00e0f342009-02-24 15:30:23 +0000215
216 ret = rds_cong_get_maps(conn);
217 if (ret) {
Sowmini Varadhan840df162017-08-02 10:34:31 -0700218 kfree(conn->c_path);
Andy Grover00e0f342009-02-24 15:30:23 +0000219 kmem_cache_free(rds_conn_slab, conn);
220 conn = ERR_PTR(ret);
221 goto out;
222 }
223
224 /*
225 * This is where a connection becomes loopback. If *any* RDS sockets
226 * can bind to the destination address then we'd rather the messages
227 * flow through loopback rather than either transport.
228 */
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700229 loop_trans = rds_trans_get_preferred(net, faddr, conn->c_dev_if);
Zach Brown5adb5bc2010-07-23 10:32:31 -0700230 if (loop_trans) {
231 rds_trans_put(loop_trans);
Andy Grover00e0f342009-02-24 15:30:23 +0000232 conn->c_loopback = 1;
233 if (is_outgoing && trans->t_prefer_loopback) {
234 /* "outgoing" connection - and the transport
235 * says it wants the connection handled by the
236 * loopback transport. This is what TCP does.
237 */
238 trans = &rds_loop_transport;
239 }
240 }
241
242 conn->c_trans = trans;
243
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700244 init_waitqueue_head(&conn->c_hs_waitq);
Sowmini Varadhan840df162017-08-02 10:34:31 -0700245 for (i = 0; i < npaths; i++) {
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700246 __rds_conn_path_init(conn, &conn->c_path[i],
247 is_outgoing);
248 conn->c_path[i].cp_index = i;
249 }
Sowmini Varadhanebeeb1a2018-02-03 04:26:51 -0800250 rcu_read_lock();
251 if (rds_destroy_pending(conn))
252 ret = -ENETDOWN;
253 else
Sowmini Varadhand4014d82018-02-13 09:46:16 -0800254 ret = trans->conn_alloc(conn, GFP_ATOMIC);
Andy Grover00e0f342009-02-24 15:30:23 +0000255 if (ret) {
Sowmini Varadhanebeeb1a2018-02-03 04:26:51 -0800256 rcu_read_unlock();
Sowmini Varadhan840df162017-08-02 10:34:31 -0700257 kfree(conn->c_path);
Andy Grover00e0f342009-02-24 15:30:23 +0000258 kmem_cache_free(rds_conn_slab, conn);
259 conn = ERR_PTR(ret);
260 goto out;
261 }
262
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700263 rdsdebug("allocated conn %p for %pI6c -> %pI6c over %s %s\n",
264 conn, laddr, faddr,
265 strnlen(trans->t_name, sizeof(trans->t_name)) ?
266 trans->t_name : "[unknown]", is_outgoing ? "(outgoing)" : "");
Andy Grover00e0f342009-02-24 15:30:23 +0000267
Andy Grovercb244052009-07-17 13:13:36 +0000268 /*
269 * Since we ran without holding the conn lock, someone could
270 * have created the same conn (either normal or passive) in the
271 * interim. We check while holding the lock. If we won, we complete
272 * init and return our conn. If we lost, we rollback and return the
273 * other one.
274 */
Andy Grover00e0f342009-02-24 15:30:23 +0000275 spin_lock_irqsave(&rds_conn_lock, flags);
Andy Grovercb244052009-07-17 13:13:36 +0000276 if (parent) {
277 /* Creating passive conn */
278 if (parent->c_passive) {
Sowmini Varadhan1c5113c2016-06-13 09:44:40 -0700279 trans->conn_free(conn->c_path[0].cp_transport_data);
Sowmini Varadhan840df162017-08-02 10:34:31 -0700280 kfree(conn->c_path);
Andy Grovercb244052009-07-17 13:13:36 +0000281 kmem_cache_free(rds_conn_slab, conn);
282 conn = parent->c_passive;
283 } else {
Andy Grover00e0f342009-02-24 15:30:23 +0000284 parent->c_passive = conn;
Andy Grovercb244052009-07-17 13:13:36 +0000285 rds_cong_add_conn(conn);
286 rds_conn_count++;
287 }
Andy Grover00e0f342009-02-24 15:30:23 +0000288 } else {
Andy Grovercb244052009-07-17 13:13:36 +0000289 /* Creating normal conn */
290 struct rds_connection *found;
Andy Grover00e0f342009-02-24 15:30:23 +0000291
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700292 found = rds_conn_lookup(net, head, laddr, faddr, trans,
293 dev_if);
Andy Grovercb244052009-07-17 13:13:36 +0000294 if (found) {
Sowmini Varadhan1c5113c2016-06-13 09:44:40 -0700295 struct rds_conn_path *cp;
296 int i;
297
Sowmini Varadhan840df162017-08-02 10:34:31 -0700298 for (i = 0; i < npaths; i++) {
Sowmini Varadhan1c5113c2016-06-13 09:44:40 -0700299 cp = &conn->c_path[i];
Sowmini Varadhan02105b22016-06-30 16:11:12 -0700300 /* The ->conn_alloc invocation may have
301 * allocated resource for all paths, so all
302 * of them may have to be freed here.
303 */
304 if (cp->cp_transport_data)
305 trans->conn_free(cp->cp_transport_data);
Sowmini Varadhan1c5113c2016-06-13 09:44:40 -0700306 }
Sowmini Varadhan840df162017-08-02 10:34:31 -0700307 kfree(conn->c_path);
Andy Grovercb244052009-07-17 13:13:36 +0000308 kmem_cache_free(rds_conn_slab, conn);
309 conn = found;
310 } else {
Sowmini Varadhan905dd412016-11-16 13:29:49 -0800311 conn->c_my_gen_num = rds_gen_num;
312 conn->c_peer_gen_num = 0;
Sowmini Varadhan3b20fc32015-09-30 16:54:07 -0400313 hlist_add_head_rcu(&conn->c_hash_node, head);
Andy Grovercb244052009-07-17 13:13:36 +0000314 rds_cong_add_conn(conn);
315 rds_conn_count++;
316 }
317 }
Andy Grover00e0f342009-02-24 15:30:23 +0000318 spin_unlock_irqrestore(&rds_conn_lock, flags);
Sowmini Varadhanebeeb1a2018-02-03 04:26:51 -0800319 rcu_read_unlock();
Andy Grover00e0f342009-02-24 15:30:23 +0000320
321out:
322 return conn;
323}
324
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400325struct rds_connection *rds_conn_create(struct net *net,
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700326 const struct in6_addr *laddr,
327 const struct in6_addr *faddr,
328 struct rds_transport *trans, gfp_t gfp,
329 int dev_if)
Andy Grover00e0f342009-02-24 15:30:23 +0000330{
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700331 return __rds_conn_create(net, laddr, faddr, trans, gfp, 0, dev_if);
Andy Grover00e0f342009-02-24 15:30:23 +0000332}
Andy Grover616b7572009-08-21 12:28:32 +0000333EXPORT_SYMBOL_GPL(rds_conn_create);
Andy Grover00e0f342009-02-24 15:30:23 +0000334
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400335struct rds_connection *rds_conn_create_outgoing(struct net *net,
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700336 const struct in6_addr *laddr,
337 const struct in6_addr *faddr,
338 struct rds_transport *trans,
339 gfp_t gfp, int dev_if)
Andy Grover00e0f342009-02-24 15:30:23 +0000340{
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700341 return __rds_conn_create(net, laddr, faddr, trans, gfp, 1, dev_if);
Andy Grover00e0f342009-02-24 15:30:23 +0000342}
Andy Grover616b7572009-08-21 12:28:32 +0000343EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
Andy Grover00e0f342009-02-24 15:30:23 +0000344
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700345void rds_conn_shutdown(struct rds_conn_path *cp)
Andy Grover2dc39352010-06-11 13:49:13 -0700346{
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700347 struct rds_connection *conn = cp->cp_conn;
348
Andy Grover2dc39352010-06-11 13:49:13 -0700349 /* shut it down unless it's down already */
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700350 if (!rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
Andy Grover2dc39352010-06-11 13:49:13 -0700351 /*
352 * Quiesce the connection mgmt handlers before we start tearing
353 * things down. We don't hold the mutex for the entire
354 * duration of the shutdown operation, else we may be
355 * deadlocking with the CM handler. Instead, the CM event
356 * handler is supposed to check for state DISCONNECTING
357 */
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700358 mutex_lock(&cp->cp_cm_lock);
359 if (!rds_conn_path_transition(cp, RDS_CONN_UP,
360 RDS_CONN_DISCONNECTING) &&
361 !rds_conn_path_transition(cp, RDS_CONN_ERROR,
362 RDS_CONN_DISCONNECTING)) {
363 rds_conn_path_error(cp,
364 "shutdown called in state %d\n",
365 atomic_read(&cp->cp_state));
366 mutex_unlock(&cp->cp_cm_lock);
Andy Grover2dc39352010-06-11 13:49:13 -0700367 return;
368 }
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700369 mutex_unlock(&cp->cp_cm_lock);
Andy Grover2dc39352010-06-11 13:49:13 -0700370
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700371 wait_event(cp->cp_waitq,
372 !test_bit(RDS_IN_XMIT, &cp->cp_flags));
373 wait_event(cp->cp_waitq,
374 !test_bit(RDS_RECV_REFILL, &cp->cp_flags));
Chris Mason7e3f2952010-05-11 15:11:11 -0700375
Sowmini Varadhan226f7a72016-06-30 16:11:10 -0700376 conn->c_trans->conn_path_shutdown(cp);
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700377 rds_conn_path_reset(cp);
Andy Grover2dc39352010-06-11 13:49:13 -0700378
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700379 if (!rds_conn_path_transition(cp, RDS_CONN_DISCONNECTING,
Sowmini Varadhane97656d2017-03-31 15:56:30 -0700380 RDS_CONN_DOWN) &&
381 !rds_conn_path_transition(cp, RDS_CONN_ERROR,
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700382 RDS_CONN_DOWN)) {
Andy Grover2dc39352010-06-11 13:49:13 -0700383 /* This can happen - eg when we're in the middle of tearing
384 * down the connection, and someone unloads the rds module.
Sowmini Varadhane97656d2017-03-31 15:56:30 -0700385 * Quite reproducible with loopback connections.
Andy Grover2dc39352010-06-11 13:49:13 -0700386 * Mostly harmless.
Sowmini Varadhane97656d2017-03-31 15:56:30 -0700387 *
388 * Note that this also happens with rds-tcp because
389 * we could have triggered rds_conn_path_drop in irq
390 * mode from rds_tcp_state change on the receipt of
391 * a FIN, thus we need to recheck for RDS_CONN_ERROR
392 * here.
Andy Grover2dc39352010-06-11 13:49:13 -0700393 */
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700394 rds_conn_path_error(cp, "%s: failed to transition "
395 "to state DOWN, current state "
396 "is %d\n", __func__,
397 atomic_read(&cp->cp_state));
Andy Grover2dc39352010-06-11 13:49:13 -0700398 return;
399 }
400 }
401
402 /* Then reconnect if it's still live.
403 * The passive side of an IB loopback connection is never added
404 * to the conn hash, so we never trigger a reconnect on this
405 * conn - the reconnect is always triggered by the active peer. */
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700406 cancel_delayed_work_sync(&cp->cp_conn_w);
Chris Masonbcf50ef2010-05-11 15:15:15 -0700407 rcu_read_lock();
408 if (!hlist_unhashed(&conn->c_hash_node)) {
409 rcu_read_unlock();
Sowmini Varadhan83150112016-06-30 16:11:17 -0700410 rds_queue_reconnect(cp);
Chris Masonbcf50ef2010-05-11 15:15:15 -0700411 } else {
412 rcu_read_unlock();
413 }
Andy Grover2dc39352010-06-11 13:49:13 -0700414}
415
Sowmini Varadhan3ecc5692016-06-13 09:44:42 -0700416/* destroy a single rds_conn_path. rds_conn_destroy() iterates over
417 * all paths using rds_conn_path_destroy()
418 */
419static void rds_conn_path_destroy(struct rds_conn_path *cp)
420{
421 struct rds_message *rm, *rtmp;
422
Sowmini Varadhan02105b22016-06-30 16:11:12 -0700423 if (!cp->cp_transport_data)
424 return;
425
Sowmini Varadhan3ecc5692016-06-13 09:44:42 -0700426 /* make sure lingering queued work won't try to ref the conn */
427 cancel_delayed_work_sync(&cp->cp_send_w);
428 cancel_delayed_work_sync(&cp->cp_recv_w);
429
Sowmini Varadhanaed20a532017-07-16 16:43:46 -0700430 rds_conn_path_drop(cp, true);
431 flush_work(&cp->cp_down_w);
432
Sowmini Varadhan3ecc5692016-06-13 09:44:42 -0700433 /* tear down queued messages */
434 list_for_each_entry_safe(rm, rtmp,
435 &cp->cp_send_queue,
436 m_conn_item) {
437 list_del_init(&rm->m_conn_item);
438 BUG_ON(!list_empty(&rm->m_sock_item));
439 rds_message_put(rm);
440 }
441 if (cp->cp_xmit_rm)
442 rds_message_put(cp->cp_xmit_rm);
443
Sowmini Varadhan3db6e0d2018-01-04 06:53:00 -0800444 WARN_ON(delayed_work_pending(&cp->cp_send_w));
445 WARN_ON(delayed_work_pending(&cp->cp_recv_w));
446 WARN_ON(delayed_work_pending(&cp->cp_conn_w));
447 WARN_ON(work_pending(&cp->cp_down_w));
448
Sowmini Varadhan3ecc5692016-06-13 09:44:42 -0700449 cp->cp_conn->c_trans->conn_free(cp->cp_transport_data);
450}
451
Andy Grover2dc39352010-06-11 13:49:13 -0700452/*
453 * Stop and free a connection.
Zach Brownffcec0e2010-07-23 10:36:58 -0700454 *
455 * This can only be used in very limited circumstances. It assumes that once
456 * the conn has been shutdown that no one else is referencing the connection.
457 * We can only ensure this in the rmmod path in the current code.
Andy Grover2dc39352010-06-11 13:49:13 -0700458 */
Andy Grover00e0f342009-02-24 15:30:23 +0000459void rds_conn_destroy(struct rds_connection *conn)
460{
Zach Brownfe8ff6b2010-07-23 10:30:45 -0700461 unsigned long flags;
Sowmini Varadhan02105b22016-06-30 16:11:12 -0700462 int i;
463 struct rds_conn_path *cp;
Sowmini Varadhan840df162017-08-02 10:34:31 -0700464 int npaths = (conn->c_trans->t_mp_capable ? RDS_MPATH_WORKERS : 1);
Andy Grover00e0f342009-02-24 15:30:23 +0000465
466 rdsdebug("freeing conn %p for %pI4 -> "
467 "%pI4\n", conn, &conn->c_laddr,
468 &conn->c_faddr);
469
Chris Masonabf45432010-05-11 15:14:52 -0700470 /* Ensure conn will not be scheduled for reconnect */
471 spin_lock_irq(&rds_conn_lock);
Chris Masonbcf50ef2010-05-11 15:15:15 -0700472 hlist_del_init_rcu(&conn->c_hash_node);
Chris Masonabf45432010-05-11 15:14:52 -0700473 spin_unlock_irq(&rds_conn_lock);
Chris Masonbcf50ef2010-05-11 15:15:15 -0700474 synchronize_rcu();
475
Zach Brownffcec0e2010-07-23 10:36:58 -0700476 /* shut the connection down */
Sowmini Varadhan840df162017-08-02 10:34:31 -0700477 for (i = 0; i < npaths; i++) {
Sowmini Varadhan02105b22016-06-30 16:11:12 -0700478 cp = &conn->c_path[i];
479 rds_conn_path_destroy(cp);
480 BUG_ON(!list_empty(&cp->cp_retrans));
Andy Grover00e0f342009-02-24 15:30:23 +0000481 }
Andy Grover00e0f342009-02-24 15:30:23 +0000482
483 /*
484 * The congestion maps aren't freed up here. They're
485 * freed by rds_cong_exit() after all the connections
486 * have been freed.
487 */
488 rds_cong_remove_conn(conn);
489
Sowmini Varadhan840df162017-08-02 10:34:31 -0700490 kfree(conn->c_path);
Andy Grover00e0f342009-02-24 15:30:23 +0000491 kmem_cache_free(rds_conn_slab, conn);
492
Zach Brownfe8ff6b2010-07-23 10:30:45 -0700493 spin_lock_irqsave(&rds_conn_lock, flags);
Andy Grover00e0f342009-02-24 15:30:23 +0000494 rds_conn_count--;
Zach Brownfe8ff6b2010-07-23 10:30:45 -0700495 spin_unlock_irqrestore(&rds_conn_lock, flags);
Andy Grover00e0f342009-02-24 15:30:23 +0000496}
Andy Grover616b7572009-08-21 12:28:32 +0000497EXPORT_SYMBOL_GPL(rds_conn_destroy);
Andy Grover00e0f342009-02-24 15:30:23 +0000498
Ka-Cheong Poon1e2b44e2018-07-23 20:51:22 -0700499static void __rds_inc_msg_cp(struct rds_incoming *inc,
500 struct rds_info_iterator *iter,
Ka-Cheong Poonb7ff8b12018-07-23 20:51:23 -0700501 void *saddr, void *daddr, int flip, bool isv6)
Ka-Cheong Poon1e2b44e2018-07-23 20:51:22 -0700502{
Ka-Cheong Poonb7ff8b12018-07-23 20:51:23 -0700503 if (isv6)
504 rds6_inc_info_copy(inc, iter, saddr, daddr, flip);
505 else
506 rds_inc_info_copy(inc, iter, *(__be32 *)saddr,
507 *(__be32 *)daddr, flip);
Ka-Cheong Poon1e2b44e2018-07-23 20:51:22 -0700508}
509
510static void rds_conn_message_info_cmn(struct socket *sock, unsigned int len,
511 struct rds_info_iterator *iter,
512 struct rds_info_lengths *lens,
Ka-Cheong Poonb7ff8b12018-07-23 20:51:23 -0700513 int want_send, bool isv6)
Andy Grover00e0f342009-02-24 15:30:23 +0000514{
515 struct hlist_head *head;
Andy Grover00e0f342009-02-24 15:30:23 +0000516 struct list_head *list;
517 struct rds_connection *conn;
518 struct rds_message *rm;
Andy Grover00e0f342009-02-24 15:30:23 +0000519 unsigned int total = 0;
Zach Brown501dccc2010-06-04 14:25:27 -0700520 unsigned long flags;
Andy Grover00e0f342009-02-24 15:30:23 +0000521 size_t i;
Sowmini Varadhan992c9ec2016-06-13 09:44:38 -0700522 int j;
Andy Grover00e0f342009-02-24 15:30:23 +0000523
Ka-Cheong Poonb7ff8b12018-07-23 20:51:23 -0700524 if (isv6)
525 len /= sizeof(struct rds6_info_message);
526 else
527 len /= sizeof(struct rds_info_message);
Andy Grover00e0f342009-02-24 15:30:23 +0000528
Chris Masonbcf50ef2010-05-11 15:15:15 -0700529 rcu_read_lock();
Andy Grover00e0f342009-02-24 15:30:23 +0000530
531 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
532 i++, head++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800533 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
Sowmini Varadhan992c9ec2016-06-13 09:44:38 -0700534 struct rds_conn_path *cp;
Sowmini Varadhan840df162017-08-02 10:34:31 -0700535 int npaths;
Andy Grover00e0f342009-02-24 15:30:23 +0000536
Ka-Cheong Poonb7ff8b12018-07-23 20:51:23 -0700537 if (!isv6 && conn->c_isv6)
538 continue;
539
Sowmini Varadhan840df162017-08-02 10:34:31 -0700540 npaths = (conn->c_trans->t_mp_capable ?
541 RDS_MPATH_WORKERS : 1);
542
543 for (j = 0; j < npaths; j++) {
Sowmini Varadhan992c9ec2016-06-13 09:44:38 -0700544 cp = &conn->c_path[j];
545 if (want_send)
546 list = &cp->cp_send_queue;
547 else
548 list = &cp->cp_retrans;
Andy Grover00e0f342009-02-24 15:30:23 +0000549
Sowmini Varadhan992c9ec2016-06-13 09:44:38 -0700550 spin_lock_irqsave(&cp->cp_lock, flags);
551
552 /* XXX too lazy to maintain counts.. */
553 list_for_each_entry(rm, list, m_conn_item) {
554 total++;
555 if (total <= len)
Ka-Cheong Poon1e2b44e2018-07-23 20:51:22 -0700556 __rds_inc_msg_cp(&rm->m_inc,
557 iter,
558 &conn->c_laddr,
559 &conn->c_faddr,
Ka-Cheong Poonb7ff8b12018-07-23 20:51:23 -0700560 0, isv6);
Sowmini Varadhan992c9ec2016-06-13 09:44:38 -0700561 }
562
563 spin_unlock_irqrestore(&cp->cp_lock, flags);
Andy Grover00e0f342009-02-24 15:30:23 +0000564 }
Andy Grover00e0f342009-02-24 15:30:23 +0000565 }
566 }
Chris Masonbcf50ef2010-05-11 15:15:15 -0700567 rcu_read_unlock();
Andy Grover00e0f342009-02-24 15:30:23 +0000568
569 lens->nr = total;
Ka-Cheong Poonb7ff8b12018-07-23 20:51:23 -0700570 if (isv6)
571 lens->each = sizeof(struct rds6_info_message);
572 else
573 lens->each = sizeof(struct rds_info_message);
Andy Grover00e0f342009-02-24 15:30:23 +0000574}
575
Ka-Cheong Poon1e2b44e2018-07-23 20:51:22 -0700576static void rds_conn_message_info(struct socket *sock, unsigned int len,
577 struct rds_info_iterator *iter,
578 struct rds_info_lengths *lens,
579 int want_send)
580{
Ka-Cheong Poonb7ff8b12018-07-23 20:51:23 -0700581 rds_conn_message_info_cmn(sock, len, iter, lens, want_send, false);
582}
583
584static void rds6_conn_message_info(struct socket *sock, unsigned int len,
585 struct rds_info_iterator *iter,
586 struct rds_info_lengths *lens,
587 int want_send)
588{
589 rds_conn_message_info_cmn(sock, len, iter, lens, want_send, true);
Ka-Cheong Poon1e2b44e2018-07-23 20:51:22 -0700590}
591
Andy Grover00e0f342009-02-24 15:30:23 +0000592static void rds_conn_message_info_send(struct socket *sock, unsigned int len,
593 struct rds_info_iterator *iter,
594 struct rds_info_lengths *lens)
595{
596 rds_conn_message_info(sock, len, iter, lens, 1);
597}
598
Ka-Cheong Poonb7ff8b12018-07-23 20:51:23 -0700599static void rds6_conn_message_info_send(struct socket *sock, unsigned int len,
600 struct rds_info_iterator *iter,
601 struct rds_info_lengths *lens)
602{
603 rds6_conn_message_info(sock, len, iter, lens, 1);
604}
605
Andy Grover00e0f342009-02-24 15:30:23 +0000606static void rds_conn_message_info_retrans(struct socket *sock,
607 unsigned int len,
608 struct rds_info_iterator *iter,
609 struct rds_info_lengths *lens)
610{
611 rds_conn_message_info(sock, len, iter, lens, 0);
612}
613
Ka-Cheong Poonb7ff8b12018-07-23 20:51:23 -0700614static void rds6_conn_message_info_retrans(struct socket *sock,
615 unsigned int len,
616 struct rds_info_iterator *iter,
617 struct rds_info_lengths *lens)
618{
619 rds6_conn_message_info(sock, len, iter, lens, 0);
620}
621
Andy Grover00e0f342009-02-24 15:30:23 +0000622void rds_for_each_conn_info(struct socket *sock, unsigned int len,
623 struct rds_info_iterator *iter,
624 struct rds_info_lengths *lens,
625 int (*visitor)(struct rds_connection *, void *),
Salvatore Mesoracaf1cb9d62018-03-11 22:07:49 +0100626 u64 *buffer,
Andy Grover00e0f342009-02-24 15:30:23 +0000627 size_t item_len)
628{
Andy Grover00e0f342009-02-24 15:30:23 +0000629 struct hlist_head *head;
Andy Grover00e0f342009-02-24 15:30:23 +0000630 struct rds_connection *conn;
Andy Grover00e0f342009-02-24 15:30:23 +0000631 size_t i;
632
Chris Masonbcf50ef2010-05-11 15:15:15 -0700633 rcu_read_lock();
Andy Grover00e0f342009-02-24 15:30:23 +0000634
635 lens->nr = 0;
636 lens->each = item_len;
637
638 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
639 i++, head++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800640 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
Andy Grover00e0f342009-02-24 15:30:23 +0000641
642 /* XXX no c_lock usage.. */
643 if (!visitor(conn, buffer))
644 continue;
645
646 /* We copy as much as we can fit in the buffer,
647 * but we count all items so that the caller
648 * can resize the buffer. */
649 if (len >= item_len) {
650 rds_info_copy(iter, buffer, item_len);
651 len -= item_len;
652 }
653 lens->nr++;
654 }
655 }
Chris Masonbcf50ef2010-05-11 15:15:15 -0700656 rcu_read_unlock();
Andy Grover00e0f342009-02-24 15:30:23 +0000657}
Andy Grover616b7572009-08-21 12:28:32 +0000658EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
Andy Grover00e0f342009-02-24 15:30:23 +0000659
Santosh Shilimkarbb789762016-12-04 16:41:29 -0800660static void rds_walk_conn_path_info(struct socket *sock, unsigned int len,
661 struct rds_info_iterator *iter,
662 struct rds_info_lengths *lens,
663 int (*visitor)(struct rds_conn_path *, void *),
Salvatore Mesoracab2c92722018-03-11 22:07:50 +0100664 u64 *buffer,
Santosh Shilimkarbb789762016-12-04 16:41:29 -0800665 size_t item_len)
Sowmini Varadhan992c9ec2016-06-13 09:44:38 -0700666{
Sowmini Varadhan992c9ec2016-06-13 09:44:38 -0700667 struct hlist_head *head;
668 struct rds_connection *conn;
669 size_t i;
Sowmini Varadhan992c9ec2016-06-13 09:44:38 -0700670
671 rcu_read_lock();
672
673 lens->nr = 0;
674 lens->each = item_len;
675
676 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
677 i++, head++) {
678 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
679 struct rds_conn_path *cp;
680
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700681 /* XXX We only copy the information from the first
682 * path for now. The problem is that if there are
683 * more than one underlying paths, we cannot report
684 * information of all of them using the existing
685 * API. For example, there is only one next_tx_seq,
686 * which path's next_tx_seq should we report? It is
687 * a bug in the design of MPRDS.
688 */
689 cp = conn->c_path;
Sowmini Varadhan992c9ec2016-06-13 09:44:38 -0700690
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700691 /* XXX no cp_lock usage.. */
692 if (!visitor(cp, buffer))
693 continue;
Sowmini Varadhan992c9ec2016-06-13 09:44:38 -0700694
695 /* We copy as much as we can fit in the buffer,
696 * but we count all items so that the caller
697 * can resize the buffer.
698 */
699 if (len >= item_len) {
700 rds_info_copy(iter, buffer, item_len);
701 len -= item_len;
702 }
703 lens->nr++;
704 }
705 }
706 rcu_read_unlock();
707}
708
709static int rds_conn_info_visitor(struct rds_conn_path *cp, void *buffer)
Andy Grover00e0f342009-02-24 15:30:23 +0000710{
711 struct rds_info_connection *cinfo = buffer;
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700712 struct rds_connection *conn = cp->cp_conn;
Andy Grover00e0f342009-02-24 15:30:23 +0000713
Ka-Cheong Poon1e2b44e2018-07-23 20:51:22 -0700714 if (conn->c_isv6)
715 return 0;
716
Sowmini Varadhan992c9ec2016-06-13 09:44:38 -0700717 cinfo->next_tx_seq = cp->cp_next_tx_seq;
718 cinfo->next_rx_seq = cp->cp_next_rx_seq;
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700719 cinfo->laddr = conn->c_laddr.s6_addr32[3];
720 cinfo->faddr = conn->c_faddr.s6_addr32[3];
721 strncpy(cinfo->transport, conn->c_trans->t_name,
Andy Grover00e0f342009-02-24 15:30:23 +0000722 sizeof(cinfo->transport));
723 cinfo->flags = 0;
724
Sowmini Varadhan992c9ec2016-06-13 09:44:38 -0700725 rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags),
Zach Brown0f4b1c72010-06-04 14:41:41 -0700726 SENDING);
Andy Grover00e0f342009-02-24 15:30:23 +0000727 /* XXX Future: return the state rather than these funky bits */
728 rds_conn_info_set(cinfo->flags,
Sowmini Varadhan992c9ec2016-06-13 09:44:38 -0700729 atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING,
Andy Grover00e0f342009-02-24 15:30:23 +0000730 CONNECTING);
731 rds_conn_info_set(cinfo->flags,
Sowmini Varadhan992c9ec2016-06-13 09:44:38 -0700732 atomic_read(&cp->cp_state) == RDS_CONN_UP,
Andy Grover00e0f342009-02-24 15:30:23 +0000733 CONNECTED);
734 return 1;
735}
736
Ka-Cheong Poonb7ff8b12018-07-23 20:51:23 -0700737static int rds6_conn_info_visitor(struct rds_conn_path *cp, void *buffer)
738{
739 struct rds6_info_connection *cinfo6 = buffer;
740 struct rds_connection *conn = cp->cp_conn;
741
742 cinfo6->next_tx_seq = cp->cp_next_tx_seq;
743 cinfo6->next_rx_seq = cp->cp_next_rx_seq;
744 cinfo6->laddr = conn->c_laddr;
745 cinfo6->faddr = conn->c_faddr;
746 strncpy(cinfo6->transport, conn->c_trans->t_name,
747 sizeof(cinfo6->transport));
748 cinfo6->flags = 0;
749
750 rds_conn_info_set(cinfo6->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags),
751 SENDING);
752 /* XXX Future: return the state rather than these funky bits */
753 rds_conn_info_set(cinfo6->flags,
754 atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING,
755 CONNECTING);
756 rds_conn_info_set(cinfo6->flags,
757 atomic_read(&cp->cp_state) == RDS_CONN_UP,
758 CONNECTED);
759 /* Just return 1 as there is no error case. This is a helper function
760 * for rds_walk_conn_path_info() and it wants a return value.
761 */
762 return 1;
763}
764
Andy Grover00e0f342009-02-24 15:30:23 +0000765static void rds_conn_info(struct socket *sock, unsigned int len,
766 struct rds_info_iterator *iter,
767 struct rds_info_lengths *lens)
768{
Salvatore Mesoracab2c92722018-03-11 22:07:50 +0100769 u64 buffer[(sizeof(struct rds_info_connection) + 7) / 8];
770
Sowmini Varadhan992c9ec2016-06-13 09:44:38 -0700771 rds_walk_conn_path_info(sock, len, iter, lens,
Andy Grover00e0f342009-02-24 15:30:23 +0000772 rds_conn_info_visitor,
Salvatore Mesoracab2c92722018-03-11 22:07:50 +0100773 buffer,
Andy Grover00e0f342009-02-24 15:30:23 +0000774 sizeof(struct rds_info_connection));
775}
776
Ka-Cheong Poonb7ff8b12018-07-23 20:51:23 -0700777static void rds6_conn_info(struct socket *sock, unsigned int len,
778 struct rds_info_iterator *iter,
779 struct rds_info_lengths *lens)
780{
781 u64 buffer[(sizeof(struct rds6_info_connection) + 7) / 8];
782
783 rds_walk_conn_path_info(sock, len, iter, lens,
784 rds6_conn_info_visitor,
785 buffer,
786 sizeof(struct rds6_info_connection));
787}
788
Zach Brownef87b7e2010-07-09 12:26:20 -0700789int rds_conn_init(void)
Andy Grover00e0f342009-02-24 15:30:23 +0000790{
Sowmini Varadhanc809195f2018-06-25 06:41:25 -0700791 int ret;
792
793 ret = rds_loop_net_init(); /* register pernet callback */
794 if (ret)
795 return ret;
796
Andy Grover00e0f342009-02-24 15:30:23 +0000797 rds_conn_slab = kmem_cache_create("rds_connection",
798 sizeof(struct rds_connection),
799 0, 0, NULL);
Sowmini Varadhanc809195f2018-06-25 06:41:25 -0700800 if (!rds_conn_slab) {
801 rds_loop_net_exit();
Andy Grover00e0f342009-02-24 15:30:23 +0000802 return -ENOMEM;
Sowmini Varadhanc809195f2018-06-25 06:41:25 -0700803 }
Andy Grover00e0f342009-02-24 15:30:23 +0000804
805 rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
806 rds_info_register_func(RDS_INFO_SEND_MESSAGES,
807 rds_conn_message_info_send);
808 rds_info_register_func(RDS_INFO_RETRANS_MESSAGES,
809 rds_conn_message_info_retrans);
Ka-Cheong Poonb7ff8b12018-07-23 20:51:23 -0700810 rds_info_register_func(RDS6_INFO_CONNECTIONS, rds6_conn_info);
811 rds_info_register_func(RDS6_INFO_SEND_MESSAGES,
812 rds6_conn_message_info_send);
813 rds_info_register_func(RDS6_INFO_RETRANS_MESSAGES,
814 rds6_conn_message_info_retrans);
Andy Grover00e0f342009-02-24 15:30:23 +0000815
816 return 0;
817}
818
819void rds_conn_exit(void)
820{
Sowmini Varadhanc809195f2018-06-25 06:41:25 -0700821 rds_loop_net_exit(); /* unregister pernet callback */
Andy Grover00e0f342009-02-24 15:30:23 +0000822 rds_loop_exit();
823
824 WARN_ON(!hlist_empty(rds_conn_hash));
825
826 kmem_cache_destroy(rds_conn_slab);
827
828 rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info);
829 rds_info_deregister_func(RDS_INFO_SEND_MESSAGES,
830 rds_conn_message_info_send);
831 rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES,
832 rds_conn_message_info_retrans);
Ka-Cheong Poonb7ff8b12018-07-23 20:51:23 -0700833 rds_info_deregister_func(RDS6_INFO_CONNECTIONS, rds6_conn_info);
834 rds_info_deregister_func(RDS6_INFO_SEND_MESSAGES,
835 rds6_conn_message_info_send);
836 rds_info_deregister_func(RDS6_INFO_RETRANS_MESSAGES,
837 rds6_conn_message_info_retrans);
Andy Grover00e0f342009-02-24 15:30:23 +0000838}
839
840/*
841 * Force a disconnect
842 */
Sowmini Varadhanaed20a532017-07-16 16:43:46 -0700843void rds_conn_path_drop(struct rds_conn_path *cp, bool destroy)
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700844{
845 atomic_set(&cp->cp_state, RDS_CONN_ERROR);
Sowmini Varadhanaed20a532017-07-16 16:43:46 -0700846
Sowmini Varadhan3db6e0d2018-01-04 06:53:00 -0800847 rcu_read_lock();
Sowmini Varadhanebeeb1a2018-02-03 04:26:51 -0800848 if (!destroy && rds_destroy_pending(cp->cp_conn)) {
Sowmini Varadhan3db6e0d2018-01-04 06:53:00 -0800849 rcu_read_unlock();
Sowmini Varadhanaed20a532017-07-16 16:43:46 -0700850 return;
Sowmini Varadhan3db6e0d2018-01-04 06:53:00 -0800851 }
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700852 queue_work(rds_wq, &cp->cp_down_w);
Sowmini Varadhan3db6e0d2018-01-04 06:53:00 -0800853 rcu_read_unlock();
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700854}
855EXPORT_SYMBOL_GPL(rds_conn_path_drop);
856
Andy Grover00e0f342009-02-24 15:30:23 +0000857void rds_conn_drop(struct rds_connection *conn)
858{
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700859 WARN_ON(conn->c_trans->t_mp_capable);
Sowmini Varadhanaed20a532017-07-16 16:43:46 -0700860 rds_conn_path_drop(&conn->c_path[0], false);
Andy Grover00e0f342009-02-24 15:30:23 +0000861}
Andy Grover616b7572009-08-21 12:28:32 +0000862EXPORT_SYMBOL_GPL(rds_conn_drop);
Andy Grover00e0f342009-02-24 15:30:23 +0000863
864/*
Zach Brownf3c68082010-05-24 13:14:36 -0700865 * If the connection is down, trigger a connect. We may have scheduled a
866 * delayed reconnect however - in this case we should not interfere.
867 */
Sowmini Varadhan3c0a5902016-06-13 09:44:37 -0700868void rds_conn_path_connect_if_down(struct rds_conn_path *cp)
869{
Sowmini Varadhan3db6e0d2018-01-04 06:53:00 -0800870 rcu_read_lock();
Sowmini Varadhanebeeb1a2018-02-03 04:26:51 -0800871 if (rds_destroy_pending(cp->cp_conn)) {
Sowmini Varadhan3db6e0d2018-01-04 06:53:00 -0800872 rcu_read_unlock();
873 return;
874 }
Sowmini Varadhan3c0a5902016-06-13 09:44:37 -0700875 if (rds_conn_path_state(cp) == RDS_CONN_DOWN &&
876 !test_and_set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags))
877 queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
Sowmini Varadhan3db6e0d2018-01-04 06:53:00 -0800878 rcu_read_unlock();
Sowmini Varadhan3c0a5902016-06-13 09:44:37 -0700879}
Sowmini Varadhan1a0e1002016-11-16 13:29:50 -0800880EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down);
Sowmini Varadhan3c0a5902016-06-13 09:44:37 -0700881
Zach Brownf3c68082010-05-24 13:14:36 -0700882void rds_conn_connect_if_down(struct rds_connection *conn)
883{
Sowmini Varadhan3c0a5902016-06-13 09:44:37 -0700884 WARN_ON(conn->c_trans->t_mp_capable);
885 rds_conn_path_connect_if_down(&conn->c_path[0]);
Zach Brownf3c68082010-05-24 13:14:36 -0700886}
887EXPORT_SYMBOL_GPL(rds_conn_connect_if_down);
888
Sowmini Varadhanfb1b3dc2016-06-13 09:44:39 -0700889void
890__rds_conn_path_error(struct rds_conn_path *cp, const char *fmt, ...)
891{
892 va_list ap;
893
894 va_start(ap, fmt);
895 vprintk(fmt, ap);
896 va_end(ap);
897
Sowmini Varadhanaed20a532017-07-16 16:43:46 -0700898 rds_conn_path_drop(cp, false);
Sowmini Varadhanfb1b3dc2016-06-13 09:44:39 -0700899}