blob: 7933fb92d8527e527302d6161bf984827ddcc1cd [file] [log] [blame]
Ying Xuec5fa7b32013-06-17 10:54:39 -04001/*
2 * net/tipc/server.c: TIPC server infrastructure
3 *
4 * Copyright (c) 2012-2013, Wind River Systems
Jon Maloydf79d042018-02-15 10:40:44 +01005 * Copyright (c) 2017, Ericsson AB
Ying Xuec5fa7b32013-06-17 10:54:39 -04006 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
Jon Maloyc901d262018-02-15 10:40:43 +010037#include "subscr.h"
Ying Xuec5fa7b32013-06-17 10:54:39 -040038#include "server.h"
39#include "core.h"
Ying Xue859fc7c2015-01-09 15:27:01 +080040#include "socket.h"
Jon Maloy14c04492017-10-13 11:04:17 +020041#include "addr.h"
42#include "msg.h"
Ying Xuec5fa7b32013-06-17 10:54:39 -040043#include <net/sock.h>
Ying Xue76100a82015-03-18 09:32:57 +080044#include <linux/module.h>
Ying Xuec5fa7b32013-06-17 10:54:39 -040045
46/* Number of messages to send before rescheduling */
47#define MAX_SEND_MSG_COUNT 25
48#define MAX_RECV_MSG_COUNT 25
49#define CF_CONNECTED 1
Ying Xue76100a82015-03-18 09:32:57 +080050#define CF_SERVER 2
Ying Xuec5fa7b32013-06-17 10:54:39 -040051
52#define sock2con(x) ((struct tipc_conn *)(x)->sk_user_data)
53
54/**
55 * struct tipc_conn - TIPC connection structure
56 * @kref: reference counter to connection object
57 * @conid: connection identifier
58 * @sock: socket handler associated with connection
59 * @flags: indicates connection state
60 * @server: pointer to connected server
Jon Maloydf79d042018-02-15 10:40:44 +010061 * @sub_list: lsit to all pertaing subscriptions
62 * @sub_lock: lock protecting the subscription list
63 * @outqueue_lock: control access to the outqueue
Ying Xuec5fa7b32013-06-17 10:54:39 -040064 * @rwork: receive work item
Ying Xuec5fa7b32013-06-17 10:54:39 -040065 * @rx_action: what to do when connection socket is active
66 * @outqueue: pointer to first outbound message in queue
stephen hemminger963a18552014-01-12 12:48:00 -080067 * @outqueue_lock: control access to the outqueue
Ying Xuec5fa7b32013-06-17 10:54:39 -040068 * @swork: send work item
69 */
70struct tipc_conn {
71 struct kref kref;
72 int conid;
73 struct socket *sock;
74 unsigned long flags;
75 struct tipc_server *server;
Jon Maloydf79d042018-02-15 10:40:44 +010076 struct list_head sub_list;
77 spinlock_t sub_lock; /* for subscription list */
Ying Xuec5fa7b32013-06-17 10:54:39 -040078 struct work_struct rwork;
79 int (*rx_action) (struct tipc_conn *con);
Ying Xuec5fa7b32013-06-17 10:54:39 -040080 struct list_head outqueue;
81 spinlock_t outqueue_lock;
82 struct work_struct swork;
83};
84
85/* An entry waiting to be sent */
86struct outqueue_entry {
Jon Maloy414574a2018-02-15 10:40:45 +010087 bool inactive;
88 struct tipc_event evt;
Ying Xuec5fa7b32013-06-17 10:54:39 -040089 struct list_head list;
Ying Xuec5fa7b32013-06-17 10:54:39 -040090};
91
92static void tipc_recv_work(struct work_struct *work);
93static void tipc_send_work(struct work_struct *work);
94static void tipc_clean_outqueues(struct tipc_conn *con);
95
Jon Maloydf79d042018-02-15 10:40:44 +010096static bool connected(struct tipc_conn *con)
97{
98 return con && test_bit(CF_CONNECTED, &con->flags);
99}
100
101/**
102 * htohl - convert value to endianness used by destination
103 * @in: value to convert
104 * @swap: non-zero if endianness must be reversed
105 *
106 * Returns converted value
107 */
108static u32 htohl(u32 in, int swap)
109{
110 return swap ? swab32(in) : in;
111}
112
Ying Xuec5fa7b32013-06-17 10:54:39 -0400113static void tipc_conn_kref_release(struct kref *kref)
114{
115 struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
Parthasarathy Bhuvaraganfc0adfc2017-01-24 13:00:45 +0100116 struct tipc_server *s = con->server;
Ying Xue76100a82015-03-18 09:32:57 +0800117 struct socket *sock = con->sock;
Ying Xuec5fa7b32013-06-17 10:54:39 -0400118
Ying Xue76100a82015-03-18 09:32:57 +0800119 if (sock) {
Ying Xue76100a82015-03-18 09:32:57 +0800120 if (test_bit(CF_SERVER, &con->flags)) {
121 __module_get(sock->ops->owner);
Jon Maloydf79d042018-02-15 10:40:44 +0100122 __module_get(sock->sk->sk_prot_creator->owner);
Ying Xue76100a82015-03-18 09:32:57 +0800123 }
Ying Xuedef81f62015-04-23 09:37:38 -0400124 sock_release(sock);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400125 con->sock = NULL;
126 }
Jon Maloy14c04492017-10-13 11:04:17 +0200127 spin_lock_bh(&s->idr_lock);
128 idr_remove(&s->conn_idr, con->conid);
129 s->idr_in_use--;
130 spin_unlock_bh(&s->idr_lock);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400131 tipc_clean_outqueues(con);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400132 kfree(con);
133}
134
135static void conn_put(struct tipc_conn *con)
136{
137 kref_put(&con->kref, tipc_conn_kref_release);
138}
139
140static void conn_get(struct tipc_conn *con)
141{
142 kref_get(&con->kref);
143}
144
145static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
146{
147 struct tipc_conn *con;
148
149 spin_lock_bh(&s->idr_lock);
150 con = idr_find(&s->conn_idr, conid);
Jon Maloydf79d042018-02-15 10:40:44 +0100151 if (!connected(con) || !kref_get_unless_zero(&con->kref))
152 con = NULL;
Ying Xuec5fa7b32013-06-17 10:54:39 -0400153 spin_unlock_bh(&s->idr_lock);
154 return con;
155}
156
Jon Maloy414574a2018-02-15 10:40:45 +0100157/* sock_data_ready - interrupt callback indicating the socket has data to read
158 * The queued job is launched in tipc_recv_from_sock()
159 */
David S. Miller676d2362014-04-11 16:15:36 -0400160static void sock_data_ready(struct sock *sk)
Ying Xuec5fa7b32013-06-17 10:54:39 -0400161{
162 struct tipc_conn *con;
163
Eric Dumazetb91083a2016-05-17 17:44:09 -0700164 read_lock_bh(&sk->sk_callback_lock);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400165 con = sock2con(sk);
Jon Maloydf79d042018-02-15 10:40:44 +0100166 if (connected(con)) {
Ying Xuec5fa7b32013-06-17 10:54:39 -0400167 conn_get(con);
168 if (!queue_work(con->server->rcv_wq, &con->rwork))
169 conn_put(con);
170 }
Eric Dumazetb91083a2016-05-17 17:44:09 -0700171 read_unlock_bh(&sk->sk_callback_lock);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400172}
173
Jon Maloy414574a2018-02-15 10:40:45 +0100174/* sock_write_space - interrupt callback after a sendmsg EAGAIN
175 * Indicates that there now is more is space in the send buffer
176 * The queued job is launched in tipc_send_to_sock()
177 */
Ying Xuec5fa7b32013-06-17 10:54:39 -0400178static void sock_write_space(struct sock *sk)
179{
180 struct tipc_conn *con;
181
Eric Dumazetb91083a2016-05-17 17:44:09 -0700182 read_lock_bh(&sk->sk_callback_lock);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400183 con = sock2con(sk);
Jon Maloydf79d042018-02-15 10:40:44 +0100184 if (connected(con)) {
Ying Xuec5fa7b32013-06-17 10:54:39 -0400185 conn_get(con);
186 if (!queue_work(con->server->send_wq, &con->swork))
187 conn_put(con);
188 }
Eric Dumazetb91083a2016-05-17 17:44:09 -0700189 read_unlock_bh(&sk->sk_callback_lock);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400190}
191
192static void tipc_register_callbacks(struct socket *sock, struct tipc_conn *con)
193{
194 struct sock *sk = sock->sk;
195
196 write_lock_bh(&sk->sk_callback_lock);
197
198 sk->sk_data_ready = sock_data_ready;
199 sk->sk_write_space = sock_write_space;
200 sk->sk_user_data = con;
201
202 con->sock = sock;
203
204 write_unlock_bh(&sk->sk_callback_lock);
205}
206
Jon Maloydf79d042018-02-15 10:40:44 +0100207/* tipc_con_delete_sub - delete a specific or all subscriptions
208 * for a given subscriber
209 */
210static void tipc_con_delete_sub(struct tipc_conn *con, struct tipc_subscr *s)
211{
212 struct list_head *sub_list = &con->sub_list;
213 struct tipc_subscription *sub, *tmp;
214
215 spin_lock_bh(&con->sub_lock);
216 list_for_each_entry_safe(sub, tmp, sub_list, subscrp_list) {
217 if (!s || !memcmp(s, &sub->evt.s, sizeof(*s)))
218 tipc_sub_delete(sub);
219 else if (s)
220 break;
221 }
222 spin_unlock_bh(&con->sub_lock);
223}
224
Parthasarathy Bhuvaragan9dc3abd2017-01-24 13:00:46 +0100225static void tipc_close_conn(struct tipc_conn *con)
Parthasarathy Bhuvaragan333f7962016-04-12 13:05:21 +0200226{
Jon Maloye88f2be2018-01-15 17:56:28 +0100227 struct sock *sk = con->sock->sk;
228 bool disconnect = false;
Parthasarathy Bhuvaragan333f7962016-04-12 13:05:21 +0200229
Jon Maloye88f2be2018-01-15 17:56:28 +0100230 write_lock_bh(&sk->sk_callback_lock);
231 disconnect = test_and_clear_bit(CF_CONNECTED, &con->flags);
Jon Maloydf79d042018-02-15 10:40:44 +0100232
Jon Maloye88f2be2018-01-15 17:56:28 +0100233 if (disconnect) {
234 sk->sk_user_data = NULL;
Parthasarathy Bhuvaragan9dc3abd2017-01-24 13:00:46 +0100235 if (con->conid)
Jon Maloydf79d042018-02-15 10:40:44 +0100236 tipc_con_delete_sub(con, NULL);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400237 }
Jon Maloye88f2be2018-01-15 17:56:28 +0100238 write_unlock_bh(&sk->sk_callback_lock);
239
240 /* Handle concurrent calls from sending and receiving threads */
241 if (!disconnect)
242 return;
243
244 /* Don't flush pending works, -just let them expire */
245 kernel_sock_shutdown(con->sock, SHUT_RDWR);
246 conn_put(con);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400247}
248
249static struct tipc_conn *tipc_alloc_conn(struct tipc_server *s)
250{
251 struct tipc_conn *con;
252 int ret;
253
254 con = kzalloc(sizeof(struct tipc_conn), GFP_ATOMIC);
255 if (!con)
256 return ERR_PTR(-ENOMEM);
257
258 kref_init(&con->kref);
259 INIT_LIST_HEAD(&con->outqueue);
Jon Maloydf79d042018-02-15 10:40:44 +0100260 INIT_LIST_HEAD(&con->sub_list);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400261 spin_lock_init(&con->outqueue_lock);
Jon Maloydf79d042018-02-15 10:40:44 +0100262 spin_lock_init(&con->sub_lock);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400263 INIT_WORK(&con->swork, tipc_send_work);
264 INIT_WORK(&con->rwork, tipc_recv_work);
265
266 spin_lock_bh(&s->idr_lock);
267 ret = idr_alloc(&s->conn_idr, con, 0, 0, GFP_ATOMIC);
268 if (ret < 0) {
269 kfree(con);
270 spin_unlock_bh(&s->idr_lock);
271 return ERR_PTR(-ENOMEM);
272 }
273 con->conid = ret;
274 s->idr_in_use++;
275 spin_unlock_bh(&s->idr_lock);
276
277 set_bit(CF_CONNECTED, &con->flags);
278 con->server = s;
279
280 return con;
281}
282
Jon Maloy414574a2018-02-15 10:40:45 +0100283static int tipc_con_rcv_sub(struct tipc_server *srv,
284 struct tipc_conn *con,
285 struct tipc_subscr *s)
Jon Maloydf79d042018-02-15 10:40:44 +0100286{
Jon Maloydf79d042018-02-15 10:40:44 +0100287 struct tipc_subscription *sub;
288 bool status;
289 int swap;
290
291 /* Determine subscriber's endianness */
292 swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE |
293 TIPC_SUB_CANCEL));
294
295 /* Detect & process a subscription cancellation request */
296 if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
297 s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
298 tipc_con_delete_sub(con, s);
299 return 0;
300 }
301 status = !(s->filter & htohl(TIPC_SUB_NO_STATUS, swap));
Jon Maloy414574a2018-02-15 10:40:45 +0100302 sub = tipc_subscrp_subscribe(srv, s, con->conid, swap, status);
Jon Maloydf79d042018-02-15 10:40:44 +0100303 if (!sub)
304 return -1;
305
306 spin_lock_bh(&con->sub_lock);
307 list_add(&sub->subscrp_list, &con->sub_list);
308 spin_unlock_bh(&con->sub_lock);
309 return 0;
310}
311
Ying Xuec5fa7b32013-06-17 10:54:39 -0400312static int tipc_receive_from_sock(struct tipc_conn *con)
313{
Jon Maloy414574a2018-02-15 10:40:45 +0100314 struct tipc_server *srv = con->server;
Jon Maloye88f2be2018-01-15 17:56:28 +0100315 struct sock *sk = con->sock->sk;
Jon Maloye88f2be2018-01-15 17:56:28 +0100316 struct msghdr msg = {};
Jon Maloy414574a2018-02-15 10:40:45 +0100317 struct tipc_subscr s;
Ying Xuec5fa7b32013-06-17 10:54:39 -0400318 struct kvec iov;
Ying Xuec5fa7b32013-06-17 10:54:39 -0400319 int ret;
320
Jon Maloy414574a2018-02-15 10:40:45 +0100321 iov.iov_base = &s;
322 iov.iov_len = sizeof(s);
Jon Maloyc901d262018-02-15 10:40:43 +0100323 msg.msg_name = NULL;
Al Virobc480272017-09-20 22:08:04 -0400324 iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, iov.iov_len);
325 ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
Jon Maloy414574a2018-02-15 10:40:45 +0100326 if (ret == -EWOULDBLOCK)
327 return -EWOULDBLOCK;
328 if (ret > 0) {
329 read_lock_bh(&sk->sk_callback_lock);
330 ret = tipc_con_rcv_sub(srv, con, &s);
331 read_unlock_bh(&sk->sk_callback_lock);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400332 }
Jon Maloye88f2be2018-01-15 17:56:28 +0100333 if (ret < 0)
Ying Xuec5fa7b32013-06-17 10:54:39 -0400334 tipc_close_conn(con);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400335
336 return ret;
337}
338
339static int tipc_accept_from_sock(struct tipc_conn *con)
340{
Ying Xuec5fa7b32013-06-17 10:54:39 -0400341 struct socket *sock = con->sock;
342 struct socket *newsock;
343 struct tipc_conn *newcon;
344 int ret;
345
Ying Xue76100a82015-03-18 09:32:57 +0800346 ret = kernel_accept(sock, &newsock, O_NONBLOCK);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400347 if (ret < 0)
348 return ret;
349
350 newcon = tipc_alloc_conn(con->server);
351 if (IS_ERR(newcon)) {
352 ret = PTR_ERR(newcon);
353 sock_release(newsock);
354 return ret;
355 }
356
357 newcon->rx_action = tipc_receive_from_sock;
358 tipc_register_callbacks(newsock, newcon);
359
Ying Xuec5fa7b32013-06-17 10:54:39 -0400360 /* Wake up receive process in case of 'SYN+' message */
David S. Miller676d2362014-04-11 16:15:36 -0400361 newsock->sk->sk_data_ready(newsock->sk);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400362 return ret;
363}
364
365static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
366{
367 struct tipc_server *s = con->server;
368 struct socket *sock = NULL;
Jon Maloy27469b72018-02-15 10:40:42 +0100369 int imp = TIPC_CRITICAL_IMPORTANCE;
Ying Xuec5fa7b32013-06-17 10:54:39 -0400370 int ret;
371
Ying Xuefa787ae2015-05-13 11:20:38 +0800372 ret = sock_create_kern(s->net, AF_TIPC, SOCK_SEQPACKET, 0, &sock);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400373 if (ret < 0)
374 return NULL;
375 ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE,
Jon Maloy27469b72018-02-15 10:40:42 +0100376 (char *)&imp, sizeof(imp));
Ying Xuec5fa7b32013-06-17 10:54:39 -0400377 if (ret < 0)
378 goto create_err;
379 ret = kernel_bind(sock, (struct sockaddr *)s->saddr, sizeof(*s->saddr));
380 if (ret < 0)
381 goto create_err;
382
Jon Maloy27469b72018-02-15 10:40:42 +0100383 con->rx_action = tipc_accept_from_sock;
384 ret = kernel_listen(sock, 0);
385 if (ret < 0)
Ying Xuec5fa7b32013-06-17 10:54:39 -0400386 goto create_err;
Ying Xue76100a82015-03-18 09:32:57 +0800387
388 /* As server's listening socket owner and creator is the same module,
389 * we have to decrease TIPC module reference count to guarantee that
390 * it remains zero after the server socket is created, otherwise,
391 * executing "rmmod" command is unable to make TIPC module deleted
392 * after TIPC module is inserted successfully.
393 *
394 * However, the reference count is ever increased twice in
395 * sock_create_kern(): one is to increase the reference count of owner
396 * of TIPC socket's proto_ops struct; another is to increment the
397 * reference count of owner of TIPC proto struct. Therefore, we must
398 * decrement the module reference count twice to ensure that it keeps
399 * zero after server's listening socket is created. Of course, we
400 * must bump the module reference count twice as well before the socket
401 * is closed.
402 */
403 module_put(sock->ops->owner);
404 module_put(sock->sk->sk_prot_creator->owner);
405 set_bit(CF_SERVER, &con->flags);
406
Ying Xuec5fa7b32013-06-17 10:54:39 -0400407 return sock;
408
409create_err:
Ying Xue76100a82015-03-18 09:32:57 +0800410 kernel_sock_shutdown(sock, SHUT_RDWR);
Ying Xuedef81f62015-04-23 09:37:38 -0400411 sock_release(sock);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400412 return NULL;
413}
414
415static int tipc_open_listening_sock(struct tipc_server *s)
416{
417 struct socket *sock;
418 struct tipc_conn *con;
419
420 con = tipc_alloc_conn(s);
421 if (IS_ERR(con))
422 return PTR_ERR(con);
423
424 sock = tipc_create_listen_sock(con);
Ying Xuec756891a2013-08-01 08:29:18 -0400425 if (!sock) {
426 idr_remove(&s->conn_idr, con->conid);
427 s->idr_in_use--;
428 kfree(con);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400429 return -EINVAL;
Ying Xuec756891a2013-08-01 08:29:18 -0400430 }
Ying Xuec5fa7b32013-06-17 10:54:39 -0400431
432 tipc_register_callbacks(sock, con);
433 return 0;
434}
435
Ying Xuec5fa7b32013-06-17 10:54:39 -0400436static void tipc_clean_outqueues(struct tipc_conn *con)
437{
438 struct outqueue_entry *e, *safe;
439
440 spin_lock_bh(&con->outqueue_lock);
441 list_for_each_entry_safe(e, safe, &con->outqueue, list) {
442 list_del(&e->list);
Jon Maloy414574a2018-02-15 10:40:45 +0100443 kfree(e);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400444 }
445 spin_unlock_bh(&con->outqueue_lock);
446}
447
Jon Maloy414574a2018-02-15 10:40:45 +0100448/* tipc_conn_queue_evt - interrupt level call from a subscription instance
449 * The queued job is launched in tipc_send_to_sock()
450 */
451void tipc_conn_queue_evt(struct tipc_server *s, int conid,
452 u32 event, struct tipc_event *evt)
Ying Xuec5fa7b32013-06-17 10:54:39 -0400453{
454 struct outqueue_entry *e;
455 struct tipc_conn *con;
456
457 con = tipc_conn_lookup(s, conid);
458 if (!con)
Jon Maloy414574a2018-02-15 10:40:45 +0100459 return;
Ying Xuec5fa7b32013-06-17 10:54:39 -0400460
Jon Maloy414574a2018-02-15 10:40:45 +0100461 if (!connected(con))
462 goto err;
Parthasarathy Bhuvaragan4c887aa2017-01-24 13:00:47 +0100463
Jon Maloy414574a2018-02-15 10:40:45 +0100464 e = kmalloc(sizeof(*e), GFP_ATOMIC);
465 if (!e)
466 goto err;
467 e->inactive = (event == TIPC_SUBSCR_TIMEOUT);
468 memcpy(&e->evt, evt, sizeof(*evt));
Ying Xuec5fa7b32013-06-17 10:54:39 -0400469 spin_lock_bh(&con->outqueue_lock);
470 list_add_tail(&e->list, &con->outqueue);
471 spin_unlock_bh(&con->outqueue_lock);
472
Jon Maloy414574a2018-02-15 10:40:45 +0100473 if (queue_work(s->send_wq, &con->swork))
474 return;
475err:
476 conn_put(con);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400477}
478
Jon Maloy232d07b2018-01-08 21:03:30 +0100479bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
480 u32 upper, u32 filter, int *conid)
Jon Maloy14c04492017-10-13 11:04:17 +0200481{
Jon Maloy14c04492017-10-13 11:04:17 +0200482 struct tipc_subscr sub;
Jon Maloy14c04492017-10-13 11:04:17 +0200483 struct tipc_conn *con;
Jon Maloydf79d042018-02-15 10:40:44 +0100484 int rc;
Jon Maloy14c04492017-10-13 11:04:17 +0200485
486 sub.seq.type = type;
487 sub.seq.lower = lower;
488 sub.seq.upper = upper;
489 sub.timeout = TIPC_WAIT_FOREVER;
Jon Maloy83485002018-01-08 21:03:29 +0100490 sub.filter = filter;
Jon Maloy14c04492017-10-13 11:04:17 +0200491 *(u32 *)&sub.usr_handle = port;
492
493 con = tipc_alloc_conn(tipc_topsrv(net));
Dan Carpenterc75e4272017-10-18 10:48:25 +0300494 if (IS_ERR(con))
Jon Maloy14c04492017-10-13 11:04:17 +0200495 return false;
496
497 *conid = con->conid;
Jon Maloy14c04492017-10-13 11:04:17 +0200498 con->sock = NULL;
Jon Maloy414574a2018-02-15 10:40:45 +0100499 rc = tipc_con_rcv_sub(tipc_topsrv(net), con, &sub);
Jon Maloydf79d042018-02-15 10:40:44 +0100500 if (rc < 0)
501 tipc_close_conn(con);
502 return !rc;
Jon Maloy14c04492017-10-13 11:04:17 +0200503}
504
505void tipc_topsrv_kern_unsubscr(struct net *net, int conid)
506{
507 struct tipc_conn *con;
508
509 con = tipc_conn_lookup(tipc_topsrv(net), conid);
510 if (!con)
511 return;
Jon Maloye88f2be2018-01-15 17:56:28 +0100512
513 test_and_clear_bit(CF_CONNECTED, &con->flags);
Jon Maloydf79d042018-02-15 10:40:44 +0100514 tipc_con_delete_sub(con, NULL);
Jon Maloye88f2be2018-01-15 17:56:28 +0100515 conn_put(con);
Jon Maloy14c04492017-10-13 11:04:17 +0200516 conn_put(con);
517}
518
519static void tipc_send_kern_top_evt(struct net *net, struct tipc_event *evt)
520{
521 u32 port = *(u32 *)&evt->s.usr_handle;
522 u32 self = tipc_own_addr(net);
523 struct sk_buff_head evtq;
524 struct sk_buff *skb;
525
526 skb = tipc_msg_create(TOP_SRV, 0, INT_H_SIZE, sizeof(*evt),
527 self, self, port, port, 0);
528 if (!skb)
529 return;
530 msg_set_dest_droppable(buf_msg(skb), true);
531 memcpy(msg_data(buf_msg(skb)), evt, sizeof(*evt));
532 skb_queue_head_init(&evtq);
533 __skb_queue_tail(&evtq, skb);
534 tipc_sk_rcv(net, &evtq);
535}
536
Ying Xuec5fa7b32013-06-17 10:54:39 -0400537static void tipc_send_to_sock(struct tipc_conn *con)
538{
Jon Maloydf79d042018-02-15 10:40:44 +0100539 struct list_head *queue = &con->outqueue;
540 struct tipc_server *srv = con->server;
Ying Xuec5fa7b32013-06-17 10:54:39 -0400541 struct outqueue_entry *e;
Jon Maloy14c04492017-10-13 11:04:17 +0200542 struct tipc_event *evt;
Ying Xuec5fa7b32013-06-17 10:54:39 -0400543 struct msghdr msg;
Jon Maloy414574a2018-02-15 10:40:45 +0100544 struct kvec iov;
Jon Maloy14c04492017-10-13 11:04:17 +0200545 int count = 0;
Ying Xuec5fa7b32013-06-17 10:54:39 -0400546 int ret;
547
548 spin_lock_bh(&con->outqueue_lock);
Jon Maloydf79d042018-02-15 10:40:44 +0100549
550 while (!list_empty(queue)) {
551 e = list_first_entry(queue, struct outqueue_entry, list);
Jon Maloy414574a2018-02-15 10:40:45 +0100552 evt = &e->evt;
Ying Xuec5fa7b32013-06-17 10:54:39 -0400553 spin_unlock_bh(&con->outqueue_lock);
554
Jon Maloy414574a2018-02-15 10:40:45 +0100555 if (e->inactive)
Jon Maloydf79d042018-02-15 10:40:44 +0100556 tipc_con_delete_sub(con, &evt->s);
Jon Maloy414574a2018-02-15 10:40:45 +0100557
Jon Maloydf79d042018-02-15 10:40:44 +0100558 memset(&msg, 0, sizeof(msg));
559 msg.msg_flags = MSG_DONTWAIT;
Jon Maloy414574a2018-02-15 10:40:45 +0100560 iov.iov_base = evt;
561 iov.iov_len = sizeof(*evt);
562 msg.msg_name = NULL;
Jon Maloydf79d042018-02-15 10:40:44 +0100563
Jon Maloy14c04492017-10-13 11:04:17 +0200564 if (con->sock) {
Jon Maloy414574a2018-02-15 10:40:45 +0100565 ret = kernel_sendmsg(con->sock, &msg, &iov,
566 1, sizeof(*evt));
Jon Maloy14c04492017-10-13 11:04:17 +0200567 if (ret == -EWOULDBLOCK || ret == 0) {
568 cond_resched();
569 goto out;
570 } else if (ret < 0) {
Jon Maloy414574a2018-02-15 10:40:45 +0100571 goto err;
Jon Maloy14c04492017-10-13 11:04:17 +0200572 }
573 } else {
Jon Maloydf79d042018-02-15 10:40:44 +0100574 tipc_send_kern_top_evt(srv->net, evt);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400575 }
Jon Maloy27469b72018-02-15 10:40:42 +0100576
Ying Xuec5fa7b32013-06-17 10:54:39 -0400577 /* Don't starve users filling buffers */
578 if (++count >= MAX_SEND_MSG_COUNT) {
579 cond_resched();
580 count = 0;
581 }
Ying Xuec5fa7b32013-06-17 10:54:39 -0400582 spin_lock_bh(&con->outqueue_lock);
583 list_del(&e->list);
Jon Maloy414574a2018-02-15 10:40:45 +0100584 kfree(e);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400585 }
586 spin_unlock_bh(&con->outqueue_lock);
587out:
588 return;
Jon Maloy414574a2018-02-15 10:40:45 +0100589err:
Ying Xuec5fa7b32013-06-17 10:54:39 -0400590 tipc_close_conn(con);
591}
592
593static void tipc_recv_work(struct work_struct *work)
594{
595 struct tipc_conn *con = container_of(work, struct tipc_conn, rwork);
596 int count = 0;
597
Jon Maloydf79d042018-02-15 10:40:44 +0100598 while (connected(con)) {
Ying Xuec5fa7b32013-06-17 10:54:39 -0400599 if (con->rx_action(con))
600 break;
601
602 /* Don't flood Rx machine */
603 if (++count >= MAX_RECV_MSG_COUNT) {
604 cond_resched();
605 count = 0;
606 }
607 }
608 conn_put(con);
609}
610
611static void tipc_send_work(struct work_struct *work)
612{
613 struct tipc_conn *con = container_of(work, struct tipc_conn, swork);
614
Jon Maloydf79d042018-02-15 10:40:44 +0100615 if (connected(con))
Ying Xuec5fa7b32013-06-17 10:54:39 -0400616 tipc_send_to_sock(con);
617
618 conn_put(con);
619}
620
621static void tipc_work_stop(struct tipc_server *s)
622{
623 destroy_workqueue(s->rcv_wq);
624 destroy_workqueue(s->send_wq);
625}
626
627static int tipc_work_start(struct tipc_server *s)
628{
Parthasarathy Bhuvaragan06c85812016-02-02 10:52:17 +0100629 s->rcv_wq = alloc_ordered_workqueue("tipc_rcv", 0);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400630 if (!s->rcv_wq) {
631 pr_err("can't start tipc receive workqueue\n");
632 return -ENOMEM;
633 }
634
Parthasarathy Bhuvaragan06c85812016-02-02 10:52:17 +0100635 s->send_wq = alloc_ordered_workqueue("tipc_send", 0);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400636 if (!s->send_wq) {
637 pr_err("can't start tipc send workqueue\n");
638 destroy_workqueue(s->rcv_wq);
639 return -ENOMEM;
640 }
641
642 return 0;
643}
644
645int tipc_server_start(struct tipc_server *s)
646{
647 int ret;
648
649 spin_lock_init(&s->idr_lock);
650 idr_init(&s->conn_idr);
651 s->idr_in_use = 0;
652
Ying Xuec5fa7b32013-06-17 10:54:39 -0400653 ret = tipc_work_start(s);
Jon Maloy414574a2018-02-15 10:40:45 +0100654 if (ret < 0)
Ying Xuec5fa7b32013-06-17 10:54:39 -0400655 return ret;
Jon Maloy414574a2018-02-15 10:40:45 +0100656
Ying Xuec756891a2013-08-01 08:29:18 -0400657 ret = tipc_open_listening_sock(s);
Jon Maloy414574a2018-02-15 10:40:45 +0100658 if (ret < 0)
Ying Xuec756891a2013-08-01 08:29:18 -0400659 tipc_work_stop(s);
Jon Maloy414574a2018-02-15 10:40:45 +0100660
Ying Xuec756891a2013-08-01 08:29:18 -0400661 return ret;
Ying Xuec5fa7b32013-06-17 10:54:39 -0400662}
663
664void tipc_server_stop(struct tipc_server *s)
665{
666 struct tipc_conn *con;
Ying Xuec5fa7b32013-06-17 10:54:39 -0400667 int id;
668
Ying Xuec5fa7b32013-06-17 10:54:39 -0400669 spin_lock_bh(&s->idr_lock);
Parthasarathy Bhuvaragan35e22e42017-01-24 13:00:48 +0100670 for (id = 0; s->idr_in_use; id++) {
Ying Xuec5fa7b32013-06-17 10:54:39 -0400671 con = idr_find(&s->conn_idr, id);
672 if (con) {
Ying Xuec5fa7b32013-06-17 10:54:39 -0400673 spin_unlock_bh(&s->idr_lock);
674 tipc_close_conn(con);
675 spin_lock_bh(&s->idr_lock);
676 }
677 }
678 spin_unlock_bh(&s->idr_lock);
679
680 tipc_work_stop(s);
Ying Xuec5fa7b32013-06-17 10:54:39 -0400681 idr_destroy(&s->conn_idr);
682}