blob: 4bfad7cf96cba5f50d4e33e29ade442521edb0ba [file] [log] [blame]
David Howells17926a72007-04-26 15:48:28 -07001/* RxRPC virtual connection handler
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
Joe Perches9b6d5392016-06-02 12:08:52 -070012#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
David Howells17926a72007-04-26 15:48:28 -070014#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
David Howells17926a72007-04-26 15:48:28 -070016#include <linux/net.h>
17#include <linux/skbuff.h>
18#include <linux/crypto.h>
19#include <net/sock.h>
20#include <net/af_rxrpc.h>
21#include "ar-internal.h"
22
David Howells5873c082014-02-07 18:58:44 +000023/*
24 * Time till a connection expires after last use (in seconds).
25 */
David Howellsdad8aff2016-03-09 23:22:56 +000026unsigned int rxrpc_connection_expiry = 10 * 60;
David Howells5873c082014-02-07 18:58:44 +000027
David Howells17926a72007-04-26 15:48:28 -070028static void rxrpc_connection_reaper(struct work_struct *work);
29
30LIST_HEAD(rxrpc_connections);
31DEFINE_RWLOCK(rxrpc_connection_lock);
David Howells17926a72007-04-26 15:48:28 -070032static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
33
34/*
David Howells17926a72007-04-26 15:48:28 -070035 * allocate a new connection
36 */
37static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
38{
39 struct rxrpc_connection *conn;
40
41 _enter("");
42
43 conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
44 if (conn) {
David Howells999b69f2016-06-17 15:42:35 +010045 spin_lock_init(&conn->channel_lock);
46 init_waitqueue_head(&conn->channel_wq);
David Howells17926a72007-04-26 15:48:28 -070047 INIT_WORK(&conn->processor, &rxrpc_process_connection);
David Howells999b69f2016-06-17 15:42:35 +010048 INIT_LIST_HEAD(&conn->link);
David Howells17926a72007-04-26 15:48:28 -070049 conn->calls = RB_ROOT;
50 skb_queue_head_init(&conn->rx_queue);
David Howellse0e4d822016-04-07 17:23:58 +010051 conn->security = &rxrpc_no_security;
David Howells17926a72007-04-26 15:48:28 -070052 rwlock_init(&conn->lock);
53 spin_lock_init(&conn->state_lock);
54 atomic_set(&conn->usage, 1);
55 conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
David Howells999b69f2016-06-17 15:42:35 +010056 atomic_set(&conn->avail_chans, RXRPC_MAXCALLS);
David Howells17926a72007-04-26 15:48:28 -070057 conn->size_align = 4;
David Howells0d12f8a2016-03-04 15:53:46 +000058 conn->header_size = sizeof(struct rxrpc_wire_header);
David Howells17926a72007-04-26 15:48:28 -070059 }
60
Adrian Bunk16c61ad2007-06-15 15:15:43 -070061 _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
David Howells17926a72007-04-26 15:48:28 -070062 return conn;
63}
64
65/*
David Howells17926a72007-04-26 15:48:28 -070066 * add a call to a connection's call-by-ID tree
67 */
68static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn,
69 struct rxrpc_call *call)
70{
71 struct rxrpc_call *xcall;
72 struct rb_node *parent, **p;
73 __be32 call_id;
74
75 write_lock_bh(&conn->lock);
76
77 call_id = call->call_id;
78 p = &conn->calls.rb_node;
79 parent = NULL;
80 while (*p) {
81 parent = *p;
82 xcall = rb_entry(parent, struct rxrpc_call, conn_node);
83
84 if (call_id < xcall->call_id)
85 p = &(*p)->rb_left;
86 else if (call_id > xcall->call_id)
87 p = &(*p)->rb_right;
88 else
89 BUG();
90 }
91
92 rb_link_node(&call->conn_node, parent, p);
93 rb_insert_color(&call->conn_node, &conn->calls);
94
95 write_unlock_bh(&conn->lock);
96}
97
98/*
David Howells999b69f2016-06-17 15:42:35 +010099 * Allocate a client connection. The caller must take care to clear any
100 * padding bytes in *cp.
David Howells4a3388c2016-04-04 14:00:37 +0100101 */
102static struct rxrpc_connection *
David Howellsaa390bb2016-06-17 10:06:56 +0100103rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
David Howells4a3388c2016-04-04 14:00:37 +0100104{
105 struct rxrpc_connection *conn;
106 int ret;
107
108 _enter("");
109
110 conn = rxrpc_alloc_connection(gfp);
111 if (!conn) {
112 _leave(" = -ENOMEM");
113 return ERR_PTR(-ENOMEM);
114 }
115
116 conn->params = *cp;
117 conn->proto.local = cp->local;
118 conn->proto.epoch = rxrpc_epoch;
119 conn->proto.cid = 0;
120 conn->proto.in_clientflag = 0;
121 conn->proto.family = cp->peer->srx.transport.family;
122 conn->out_clientflag = RXRPC_CLIENT_INITIATED;
123 conn->state = RXRPC_CONN_CLIENT;
124
125 switch (conn->proto.family) {
126 case AF_INET:
127 conn->proto.addr_size = sizeof(conn->proto.ipv4_addr);
128 conn->proto.ipv4_addr = cp->peer->srx.transport.sin.sin_addr;
129 conn->proto.port = cp->peer->srx.transport.sin.sin_port;
130 break;
131 }
132
David Howells999b69f2016-06-17 15:42:35 +0100133 ret = rxrpc_get_client_connection_id(conn, gfp);
David Howells4a3388c2016-04-04 14:00:37 +0100134 if (ret < 0)
135 goto error_0;
136
137 ret = rxrpc_init_client_conn_security(conn);
138 if (ret < 0)
139 goto error_1;
140
141 conn->security->prime_packet_security(conn);
142
143 write_lock(&rxrpc_connection_lock);
144 list_add_tail(&conn->link, &rxrpc_connections);
145 write_unlock(&rxrpc_connection_lock);
146
David Howellsaa390bb2016-06-17 10:06:56 +0100147 /* We steal the caller's peer ref. */
148 cp->peer = NULL;
149 rxrpc_get_local(conn->params.local);
David Howells4a3388c2016-04-04 14:00:37 +0100150 key_get(conn->params.key);
151
152 _leave(" = %p", conn);
153 return conn;
154
155error_1:
156 rxrpc_put_client_connection_id(conn);
157error_0:
158 kfree(conn);
159 _leave(" = %d", ret);
160 return ERR_PTR(ret);
161}
162
163/*
David Howells17926a72007-04-26 15:48:28 -0700164 * find a connection for a call
165 * - called in process context with IRQs enabled
166 */
David Howells999b69f2016-06-17 15:42:35 +0100167int rxrpc_connect_call(struct rxrpc_call *call,
David Howells19ffa012016-04-04 14:00:36 +0100168 struct rxrpc_conn_parameters *cp,
David Howells999b69f2016-06-17 15:42:35 +0100169 struct sockaddr_rxrpc *srx,
David Howells17926a72007-04-26 15:48:28 -0700170 gfp_t gfp)
171{
David Howells999b69f2016-06-17 15:42:35 +0100172 struct rxrpc_connection *conn, *candidate = NULL;
173 struct rxrpc_local *local = cp->local;
174 struct rb_node *p, **pp, *parent;
175 long diff;
David Howells4a3388c2016-04-04 14:00:37 +0100176 int chan;
David Howells17926a72007-04-26 15:48:28 -0700177
178 DECLARE_WAITQUEUE(myself, current);
179
David Howells999b69f2016-06-17 15:42:35 +0100180 _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
David Howells17926a72007-04-26 15:48:28 -0700181
David Howellsaa390bb2016-06-17 10:06:56 +0100182 cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp);
183 if (!cp->peer)
184 return -ENOMEM;
David Howells17926a72007-04-26 15:48:28 -0700185
David Howells999b69f2016-06-17 15:42:35 +0100186 if (!cp->exclusive) {
187 /* Search for a existing client connection unless this is going
188 * to be a connection that's used exclusively for a single call.
189 */
190 _debug("search 1");
191 spin_lock(&local->client_conns_lock);
192 p = local->client_conns.rb_node;
193 while (p) {
194 conn = rb_entry(p, struct rxrpc_connection, client_node);
195
196#define cmp(X) ((long)conn->params.X - (long)cp->X)
197 diff = (cmp(peer) ?:
198 cmp(key) ?:
199 cmp(security_level));
200 if (diff < 0)
201 p = p->rb_left;
202 else if (diff > 0)
203 p = p->rb_right;
204 else
205 goto found_extant_conn;
David Howells17926a72007-04-26 15:48:28 -0700206 }
David Howells999b69f2016-06-17 15:42:35 +0100207 spin_unlock(&local->client_conns_lock);
David Howells17926a72007-04-26 15:48:28 -0700208 }
209
David Howells999b69f2016-06-17 15:42:35 +0100210 /* We didn't find a connection or we want an exclusive one. */
211 _debug("get new conn");
David Howellsaa390bb2016-06-17 10:06:56 +0100212 candidate = rxrpc_alloc_client_connection(cp, gfp);
David Howells999b69f2016-06-17 15:42:35 +0100213 if (!candidate) {
214 _leave(" = -ENOMEM");
215 return -ENOMEM;
216 }
217
218 if (cp->exclusive) {
219 /* Assign the call on an exclusive connection to channel 0 and
220 * don't add the connection to the endpoint's shareable conn
221 * lookup tree.
222 */
223 _debug("exclusive chan 0");
224 conn = candidate;
225 atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
226 spin_lock(&conn->channel_lock);
227 chan = 0;
228 goto found_channel;
229 }
230
231 /* We need to redo the search before attempting to add a new connection
232 * lest we race with someone else adding a conflicting instance.
David Howells17926a72007-04-26 15:48:28 -0700233 */
David Howells999b69f2016-06-17 15:42:35 +0100234 _debug("search 2");
235 spin_lock(&local->client_conns_lock);
236
237 pp = &local->client_conns.rb_node;
238 parent = NULL;
239 while (*pp) {
240 parent = *pp;
241 conn = rb_entry(parent, struct rxrpc_connection, client_node);
242
243 diff = (cmp(peer) ?:
244 cmp(key) ?:
245 cmp(security_level));
246 if (diff < 0)
247 pp = &(*pp)->rb_left;
248 else if (diff > 0)
249 pp = &(*pp)->rb_right;
250 else
251 goto found_extant_conn;
252 }
253
254 /* The second search also failed; simply add the new connection with
255 * the new call in channel 0. Note that we need to take the channel
256 * lock before dropping the client conn lock.
257 */
258 _debug("new conn");
259 conn = candidate;
260 candidate = NULL;
261
262 rb_link_node(&conn->client_node, parent, pp);
263 rb_insert_color(&conn->client_node, &local->client_conns);
264
265 atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
266 spin_lock(&conn->channel_lock);
267 spin_unlock(&local->client_conns_lock);
268 chan = 0;
269
270found_channel:
271 _debug("found chan");
272 call->conn = conn;
273 call->channel = chan;
274 call->epoch = conn->proto.epoch;
275 call->cid = conn->proto.cid | chan;
276 call->call_id = ++conn->call_counter;
277 rcu_assign_pointer(conn->channels[chan], call);
278
279 _net("CONNECT call %d on conn %d", call->debug_id, conn->debug_id);
280
281 rxrpc_add_call_ID_to_conn(conn, call);
282 spin_unlock(&conn->channel_lock);
David Howellsaa390bb2016-06-17 10:06:56 +0100283 rxrpc_put_peer(cp->peer);
284 cp->peer = NULL;
David Howells999b69f2016-06-17 15:42:35 +0100285 _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
286 return 0;
287
288 /* We found a suitable connection already in existence. Discard any
289 * candidate we may have allocated, and try to get a channel on this
290 * one.
291 */
292found_extant_conn:
293 _debug("found conn");
294 rxrpc_get_connection(conn);
295 spin_unlock(&local->client_conns_lock);
296
297 rxrpc_put_connection(candidate);
298
299 if (!atomic_add_unless(&conn->avail_chans, -1, 0)) {
300 if (!gfpflags_allow_blocking(gfp)) {
301 rxrpc_put_connection(conn);
302 _leave(" = -EAGAIN");
303 return -EAGAIN;
304 }
305
306 add_wait_queue(&conn->channel_wq, &myself);
307 for (;;) {
308 set_current_state(TASK_INTERRUPTIBLE);
309 if (atomic_add_unless(&conn->avail_chans, -1, 0))
310 break;
311 if (signal_pending(current))
312 goto interrupted;
313 schedule();
314 }
315 remove_wait_queue(&conn->channel_wq, &myself);
316 __set_current_state(TASK_RUNNING);
317 }
318
319 /* The connection allegedly now has a free channel and we can now
320 * attach the call to it.
321 */
322 spin_lock(&conn->channel_lock);
323
David Howells17926a72007-04-26 15:48:28 -0700324 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
325 if (!conn->channels[chan])
326 goto found_channel;
327 BUG();
328
David Howells17926a72007-04-26 15:48:28 -0700329interrupted:
David Howells999b69f2016-06-17 15:42:35 +0100330 remove_wait_queue(&conn->channel_wq, &myself);
331 __set_current_state(TASK_RUNNING);
332 rxrpc_put_connection(conn);
David Howellsaa390bb2016-06-17 10:06:56 +0100333 rxrpc_put_peer(cp->peer);
334 cp->peer = NULL;
David Howells17926a72007-04-26 15:48:28 -0700335 _leave(" = -ERESTARTSYS");
336 return -ERESTARTSYS;
337}
338
339/*
340 * get a record of an incoming connection
341 */
David Howellsaa390bb2016-06-17 10:06:56 +0100342struct rxrpc_connection *rxrpc_incoming_connection(struct rxrpc_local *local,
343 struct rxrpc_peer *peer,
David Howells999b69f2016-06-17 15:42:35 +0100344 struct sk_buff *skb)
David Howells17926a72007-04-26 15:48:28 -0700345{
346 struct rxrpc_connection *conn, *candidate = NULL;
David Howells42886ff2016-06-16 13:31:07 +0100347 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
David Howells17926a72007-04-26 15:48:28 -0700348 struct rb_node *p, **pp;
349 const char *new = "old";
350 __be32 epoch;
David Howells0d12f8a2016-03-04 15:53:46 +0000351 u32 cid;
David Howells17926a72007-04-26 15:48:28 -0700352
353 _enter("");
354
David Howells42886ff2016-06-16 13:31:07 +0100355 ASSERT(sp->hdr.flags & RXRPC_CLIENT_INITIATED);
David Howells17926a72007-04-26 15:48:28 -0700356
David Howells42886ff2016-06-16 13:31:07 +0100357 epoch = sp->hdr.epoch;
358 cid = sp->hdr.cid & RXRPC_CIDMASK;
David Howells17926a72007-04-26 15:48:28 -0700359
360 /* search the connection list first */
David Howellsaa390bb2016-06-17 10:06:56 +0100361 read_lock_bh(&peer->conn_lock);
David Howells17926a72007-04-26 15:48:28 -0700362
David Howellsaa390bb2016-06-17 10:06:56 +0100363 p = peer->service_conns.rb_node;
David Howells17926a72007-04-26 15:48:28 -0700364 while (p) {
David Howells999b69f2016-06-17 15:42:35 +0100365 conn = rb_entry(p, struct rxrpc_connection, service_node);
David Howells17926a72007-04-26 15:48:28 -0700366
David Howells19ffa012016-04-04 14:00:36 +0100367 _debug("maybe %x", conn->proto.cid);
David Howells17926a72007-04-26 15:48:28 -0700368
David Howells19ffa012016-04-04 14:00:36 +0100369 if (epoch < conn->proto.epoch)
David Howells17926a72007-04-26 15:48:28 -0700370 p = p->rb_left;
David Howells19ffa012016-04-04 14:00:36 +0100371 else if (epoch > conn->proto.epoch)
David Howells17926a72007-04-26 15:48:28 -0700372 p = p->rb_right;
David Howells19ffa012016-04-04 14:00:36 +0100373 else if (cid < conn->proto.cid)
David Howells17926a72007-04-26 15:48:28 -0700374 p = p->rb_left;
David Howells19ffa012016-04-04 14:00:36 +0100375 else if (cid > conn->proto.cid)
David Howells17926a72007-04-26 15:48:28 -0700376 p = p->rb_right;
377 else
378 goto found_extant_connection;
379 }
David Howellsaa390bb2016-06-17 10:06:56 +0100380 read_unlock_bh(&peer->conn_lock);
David Howells17926a72007-04-26 15:48:28 -0700381
382 /* not yet present - create a candidate for a new record and then
383 * redo the search */
David Howells843099c2016-04-07 17:23:37 +0100384 candidate = rxrpc_alloc_connection(GFP_NOIO);
David Howells17926a72007-04-26 15:48:28 -0700385 if (!candidate) {
386 _leave(" = -ENOMEM");
387 return ERR_PTR(-ENOMEM);
388 }
389
David Howellsaa390bb2016-06-17 10:06:56 +0100390 candidate->proto.local = local;
David Howells42886ff2016-06-16 13:31:07 +0100391 candidate->proto.epoch = sp->hdr.epoch;
392 candidate->proto.cid = sp->hdr.cid & RXRPC_CIDMASK;
393 candidate->proto.in_clientflag = RXRPC_CLIENT_INITIATED;
David Howellsaa390bb2016-06-17 10:06:56 +0100394 candidate->params.local = local;
395 candidate->params.peer = peer;
David Howells42886ff2016-06-16 13:31:07 +0100396 candidate->params.service_id = sp->hdr.serviceId;
397 candidate->security_ix = sp->hdr.securityIndex;
398 candidate->out_clientflag = 0;
399 candidate->state = RXRPC_CONN_SERVER;
David Howells19ffa012016-04-04 14:00:36 +0100400 if (candidate->params.service_id)
David Howells42886ff2016-06-16 13:31:07 +0100401 candidate->state = RXRPC_CONN_SERVER_UNSECURED;
David Howells17926a72007-04-26 15:48:28 -0700402
David Howellsaa390bb2016-06-17 10:06:56 +0100403 write_lock_bh(&peer->conn_lock);
David Howells17926a72007-04-26 15:48:28 -0700404
David Howellsaa390bb2016-06-17 10:06:56 +0100405 pp = &peer->service_conns.rb_node;
David Howells17926a72007-04-26 15:48:28 -0700406 p = NULL;
407 while (*pp) {
408 p = *pp;
David Howells999b69f2016-06-17 15:42:35 +0100409 conn = rb_entry(p, struct rxrpc_connection, service_node);
David Howells17926a72007-04-26 15:48:28 -0700410
David Howells19ffa012016-04-04 14:00:36 +0100411 if (epoch < conn->proto.epoch)
David Howells17926a72007-04-26 15:48:28 -0700412 pp = &(*pp)->rb_left;
David Howells19ffa012016-04-04 14:00:36 +0100413 else if (epoch > conn->proto.epoch)
David Howells17926a72007-04-26 15:48:28 -0700414 pp = &(*pp)->rb_right;
David Howells19ffa012016-04-04 14:00:36 +0100415 else if (cid < conn->proto.cid)
David Howells17926a72007-04-26 15:48:28 -0700416 pp = &(*pp)->rb_left;
David Howells19ffa012016-04-04 14:00:36 +0100417 else if (cid > conn->proto.cid)
David Howells17926a72007-04-26 15:48:28 -0700418 pp = &(*pp)->rb_right;
419 else
420 goto found_extant_second;
421 }
422
423 /* we can now add the new candidate to the list */
424 conn = candidate;
425 candidate = NULL;
David Howells999b69f2016-06-17 15:42:35 +0100426 rb_link_node(&conn->service_node, p, pp);
David Howellsaa390bb2016-06-17 10:06:56 +0100427 rb_insert_color(&conn->service_node, &peer->service_conns);
428 rxrpc_get_peer(peer);
429 rxrpc_get_local(local);
David Howells17926a72007-04-26 15:48:28 -0700430
David Howellsaa390bb2016-06-17 10:06:56 +0100431 write_unlock_bh(&peer->conn_lock);
David Howells17926a72007-04-26 15:48:28 -0700432
David Howellsb3f57502016-06-21 16:10:03 +0100433 write_lock(&rxrpc_connection_lock);
David Howells17926a72007-04-26 15:48:28 -0700434 list_add_tail(&conn->link, &rxrpc_connections);
David Howellsb3f57502016-06-21 16:10:03 +0100435 write_unlock(&rxrpc_connection_lock);
David Howells17926a72007-04-26 15:48:28 -0700436
437 new = "new";
438
439success:
David Howells19ffa012016-04-04 14:00:36 +0100440 _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->proto.cid);
David Howells17926a72007-04-26 15:48:28 -0700441
442 _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
443 return conn;
444
445 /* we found the connection in the list immediately */
446found_extant_connection:
David Howells42886ff2016-06-16 13:31:07 +0100447 if (sp->hdr.securityIndex != conn->security_ix) {
David Howellsaa390bb2016-06-17 10:06:56 +0100448 read_unlock_bh(&peer->conn_lock);
David Howells17926a72007-04-26 15:48:28 -0700449 goto security_mismatch;
450 }
David Howells5627cc82016-04-04 14:00:38 +0100451 rxrpc_get_connection(conn);
David Howellsaa390bb2016-06-17 10:06:56 +0100452 read_unlock_bh(&peer->conn_lock);
David Howells17926a72007-04-26 15:48:28 -0700453 goto success;
454
455 /* we found the connection on the second time through the list */
456found_extant_second:
David Howells42886ff2016-06-16 13:31:07 +0100457 if (sp->hdr.securityIndex != conn->security_ix) {
David Howellsaa390bb2016-06-17 10:06:56 +0100458 write_unlock_bh(&peer->conn_lock);
David Howells17926a72007-04-26 15:48:28 -0700459 goto security_mismatch;
460 }
David Howells5627cc82016-04-04 14:00:38 +0100461 rxrpc_get_connection(conn);
David Howellsaa390bb2016-06-17 10:06:56 +0100462 write_unlock_bh(&peer->conn_lock);
David Howells17926a72007-04-26 15:48:28 -0700463 kfree(candidate);
464 goto success;
465
466security_mismatch:
467 kfree(candidate);
468 _leave(" = -EKEYREJECTED");
469 return ERR_PTR(-EKEYREJECTED);
470}
471
472/*
473 * find a connection based on transport and RxRPC connection ID for an incoming
474 * packet
475 */
David Howellsaa390bb2016-06-17 10:06:56 +0100476struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_local *local,
477 struct rxrpc_peer *peer,
David Howells42886ff2016-06-16 13:31:07 +0100478 struct sk_buff *skb)
David Howells17926a72007-04-26 15:48:28 -0700479{
480 struct rxrpc_connection *conn;
David Howells42886ff2016-06-16 13:31:07 +0100481 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
David Howells17926a72007-04-26 15:48:28 -0700482 struct rb_node *p;
David Howells0d12f8a2016-03-04 15:53:46 +0000483 u32 epoch, cid;
David Howells17926a72007-04-26 15:48:28 -0700484
David Howells42886ff2016-06-16 13:31:07 +0100485 _enter(",{%x,%x}", sp->hdr.cid, sp->hdr.flags);
David Howells17926a72007-04-26 15:48:28 -0700486
David Howellsaa390bb2016-06-17 10:06:56 +0100487 read_lock_bh(&peer->conn_lock);
David Howells17926a72007-04-26 15:48:28 -0700488
David Howells42886ff2016-06-16 13:31:07 +0100489 cid = sp->hdr.cid & RXRPC_CIDMASK;
490 epoch = sp->hdr.epoch;
David Howells17926a72007-04-26 15:48:28 -0700491
David Howells4a3388c2016-04-04 14:00:37 +0100492 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
David Howellsaa390bb2016-06-17 10:06:56 +0100493 p = peer->service_conns.rb_node;
David Howells4a3388c2016-04-04 14:00:37 +0100494 while (p) {
David Howells999b69f2016-06-17 15:42:35 +0100495 conn = rb_entry(p, struct rxrpc_connection, service_node);
David Howells17926a72007-04-26 15:48:28 -0700496
David Howells4a3388c2016-04-04 14:00:37 +0100497 _debug("maybe %x", conn->proto.cid);
David Howells17926a72007-04-26 15:48:28 -0700498
David Howells4a3388c2016-04-04 14:00:37 +0100499 if (epoch < conn->proto.epoch)
500 p = p->rb_left;
501 else if (epoch > conn->proto.epoch)
502 p = p->rb_right;
503 else if (cid < conn->proto.cid)
504 p = p->rb_left;
505 else if (cid > conn->proto.cid)
506 p = p->rb_right;
507 else
508 goto found;
509 }
510 } else {
511 conn = idr_find(&rxrpc_client_conn_ids, cid >> RXRPC_CIDSHIFT);
512 if (conn && conn->proto.epoch == epoch)
David Howells17926a72007-04-26 15:48:28 -0700513 goto found;
514 }
515
David Howellsaa390bb2016-06-17 10:06:56 +0100516 read_unlock_bh(&peer->conn_lock);
David Howells17926a72007-04-26 15:48:28 -0700517 _leave(" = NULL");
518 return NULL;
519
520found:
David Howells5627cc82016-04-04 14:00:38 +0100521 rxrpc_get_connection(conn);
David Howellsaa390bb2016-06-17 10:06:56 +0100522 read_unlock_bh(&peer->conn_lock);
David Howells17926a72007-04-26 15:48:28 -0700523 _leave(" = %p", conn);
524 return conn;
525}
526
527/*
David Howells999b69f2016-06-17 15:42:35 +0100528 * Disconnect a call and clear any channel it occupies when that call
529 * terminates.
530 */
531void rxrpc_disconnect_call(struct rxrpc_call *call)
532{
533 struct rxrpc_connection *conn = call->conn;
534 unsigned chan = call->channel;
535
536 _enter("%d,%d", conn->debug_id, call->channel);
537
538 if (conn->channels[chan] == call) {
539 rcu_assign_pointer(conn->channels[chan], NULL);
540 atomic_inc(&conn->avail_chans);
541 wake_up(&conn->channel_wq);
542 }
543}
544
545/*
David Howells17926a72007-04-26 15:48:28 -0700546 * release a virtual connection
547 */
548void rxrpc_put_connection(struct rxrpc_connection *conn)
549{
David Howells999b69f2016-06-17 15:42:35 +0100550 if (!conn)
551 return;
552
David Howells17926a72007-04-26 15:48:28 -0700553 _enter("%p{u=%d,d=%d}",
554 conn, atomic_read(&conn->usage), conn->debug_id);
555
556 ASSERTCMP(atomic_read(&conn->usage), >, 0);
557
Ksenija Stanojevic22a3f9a2015-09-17 18:12:53 +0200558 conn->put_time = ktime_get_seconds();
David Howells17926a72007-04-26 15:48:28 -0700559 if (atomic_dec_and_test(&conn->usage)) {
560 _debug("zombie");
David Howells651350d2007-04-26 15:50:17 -0700561 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
David Howells17926a72007-04-26 15:48:28 -0700562 }
563
564 _leave("");
565}
566
567/*
568 * destroy a virtual connection
569 */
570static void rxrpc_destroy_connection(struct rxrpc_connection *conn)
571{
572 _enter("%p{%d}", conn, atomic_read(&conn->usage));
573
574 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
575
576 _net("DESTROY CONN %d", conn->debug_id);
577
David Howells17926a72007-04-26 15:48:28 -0700578 ASSERT(RB_EMPTY_ROOT(&conn->calls));
579 rxrpc_purge_queue(&conn->rx_queue);
580
David Howellse0e4d822016-04-07 17:23:58 +0100581 conn->security->clear(conn);
David Howells19ffa012016-04-04 14:00:36 +0100582 key_put(conn->params.key);
David Howellse0e4d822016-04-07 17:23:58 +0100583 key_put(conn->server_key);
David Howellsaa390bb2016-06-17 10:06:56 +0100584 rxrpc_put_peer(conn->params.peer);
585 rxrpc_put_local(conn->params.local);
David Howellse0e4d822016-04-07 17:23:58 +0100586
David Howells17926a72007-04-26 15:48:28 -0700587 kfree(conn);
588 _leave("");
589}
590
591/*
592 * reap dead connections
593 */
Roel Kluin5eaa65b2008-12-10 15:18:31 -0800594static void rxrpc_connection_reaper(struct work_struct *work)
David Howells17926a72007-04-26 15:48:28 -0700595{
596 struct rxrpc_connection *conn, *_p;
David Howellsaa390bb2016-06-17 10:06:56 +0100597 struct rxrpc_peer *peer;
David Howells17926a72007-04-26 15:48:28 -0700598 unsigned long now, earliest, reap_time;
599
600 LIST_HEAD(graveyard);
601
602 _enter("");
603
Ksenija Stanojevic22a3f9a2015-09-17 18:12:53 +0200604 now = ktime_get_seconds();
David Howells17926a72007-04-26 15:48:28 -0700605 earliest = ULONG_MAX;
606
David Howellsb3f57502016-06-21 16:10:03 +0100607 write_lock(&rxrpc_connection_lock);
David Howells17926a72007-04-26 15:48:28 -0700608 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
609 _debug("reap CONN %d { u=%d,t=%ld }",
610 conn->debug_id, atomic_read(&conn->usage),
611 (long) now - (long) conn->put_time);
612
613 if (likely(atomic_read(&conn->usage) > 0))
614 continue;
615
David Howells999b69f2016-06-17 15:42:35 +0100616 if (rxrpc_conn_is_client(conn)) {
617 struct rxrpc_local *local = conn->params.local;
618 spin_lock(&local->client_conns_lock);
619 reap_time = conn->put_time + rxrpc_connection_expiry;
David Howells17926a72007-04-26 15:48:28 -0700620
David Howells999b69f2016-06-17 15:42:35 +0100621 if (atomic_read(&conn->usage) > 0) {
622 ;
623 } else if (reap_time <= now) {
624 list_move_tail(&conn->link, &graveyard);
David Howells4a3388c2016-04-04 14:00:37 +0100625 rxrpc_put_client_connection_id(conn);
David Howells999b69f2016-06-17 15:42:35 +0100626 rb_erase(&conn->client_node,
627 &local->client_conns);
628 } else if (reap_time < earliest) {
629 earliest = reap_time;
David Howells17926a72007-04-26 15:48:28 -0700630 }
631
David Howells999b69f2016-06-17 15:42:35 +0100632 spin_unlock(&local->client_conns_lock);
633 } else {
David Howellsaa390bb2016-06-17 10:06:56 +0100634 peer = conn->params.peer;
635 write_lock_bh(&peer->conn_lock);
David Howells999b69f2016-06-17 15:42:35 +0100636 reap_time = conn->put_time + rxrpc_connection_expiry;
David Howells17926a72007-04-26 15:48:28 -0700637
David Howells999b69f2016-06-17 15:42:35 +0100638 if (atomic_read(&conn->usage) > 0) {
639 ;
640 } else if (reap_time <= now) {
641 list_move_tail(&conn->link, &graveyard);
642 rb_erase(&conn->service_node,
David Howellsaa390bb2016-06-17 10:06:56 +0100643 &peer->service_conns);
David Howells999b69f2016-06-17 15:42:35 +0100644 } else if (reap_time < earliest) {
645 earliest = reap_time;
646 }
647
David Howellsaa390bb2016-06-17 10:06:56 +0100648 write_unlock_bh(&peer->conn_lock);
David Howells999b69f2016-06-17 15:42:35 +0100649 }
David Howells17926a72007-04-26 15:48:28 -0700650 }
David Howellsb3f57502016-06-21 16:10:03 +0100651 write_unlock(&rxrpc_connection_lock);
David Howells17926a72007-04-26 15:48:28 -0700652
653 if (earliest != ULONG_MAX) {
654 _debug("reschedule reaper %ld", (long) earliest - now);
655 ASSERTCMP(earliest, >, now);
David Howells651350d2007-04-26 15:50:17 -0700656 rxrpc_queue_delayed_work(&rxrpc_connection_reap,
657 (earliest - now) * HZ);
David Howells17926a72007-04-26 15:48:28 -0700658 }
659
660 /* then destroy all those pulled out */
661 while (!list_empty(&graveyard)) {
662 conn = list_entry(graveyard.next, struct rxrpc_connection,
663 link);
664 list_del_init(&conn->link);
665
666 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
667 rxrpc_destroy_connection(conn);
668 }
669
670 _leave("");
671}
672
673/*
674 * preemptively destroy all the connection records rather than waiting for them
675 * to time out
676 */
677void __exit rxrpc_destroy_all_connections(void)
678{
679 _enter("");
680
David Howells5873c082014-02-07 18:58:44 +0000681 rxrpc_connection_expiry = 0;
David Howells17926a72007-04-26 15:48:28 -0700682 cancel_delayed_work(&rxrpc_connection_reap);
David Howells651350d2007-04-26 15:50:17 -0700683 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
David Howells17926a72007-04-26 15:48:28 -0700684
685 _leave("");
686}