Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 2 | /* ar-skbuff.c: socket buffer destruction handling |
| 3 | * |
| 4 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
| 5 | * Written by David Howells (dhowells@redhat.com) |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 6 | */ |
| 7 | |
Joe Perches | 9b6d539 | 2016-06-02 12:08:52 -0700 | [diff] [blame] | 8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 9 | |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 10 | #include <linux/module.h> |
| 11 | #include <linux/net.h> |
| 12 | #include <linux/skbuff.h> |
| 13 | #include <net/sock.h> |
| 14 | #include <net/af_rxrpc.h> |
| 15 | #include "ar-internal.h" |
| 16 | |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 17 | #define select_skb_count(op) (op >= rxrpc_skb_tx_cleaned ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs) |
| 18 | |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 19 | /* |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 20 | * Note the allocation or reception of a socket buffer. |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 21 | */ |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 22 | void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 23 | { |
| 24 | const void *here = __builtin_return_address(0); |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 25 | int n = atomic_inc_return(select_skb_count(op)); |
Reshetova, Elena | 6335479 | 2017-06-30 13:07:58 +0300 | [diff] [blame] | 26 | trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 27 | } |
| 28 | |
| 29 | /* |
| 30 | * Note the re-emergence of a socket buffer from a queue or buffer. |
| 31 | */ |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 32 | void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 33 | { |
| 34 | const void *here = __builtin_return_address(0); |
| 35 | if (skb) { |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 36 | int n = atomic_read(select_skb_count(op)); |
Reshetova, Elena | 6335479 | 2017-06-30 13:07:58 +0300 | [diff] [blame] | 37 | trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 38 | } |
| 39 | } |
| 40 | |
| 41 | /* |
| 42 | * Note the addition of a ref on a socket buffer. |
| 43 | */ |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 44 | void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 45 | { |
| 46 | const void *here = __builtin_return_address(0); |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 47 | int n = atomic_inc_return(select_skb_count(op)); |
Reshetova, Elena | 6335479 | 2017-06-30 13:07:58 +0300 | [diff] [blame] | 48 | trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 49 | skb_get(skb); |
| 50 | } |
| 51 | |
| 52 | /* |
| 53 | * Note the destruction of a socket buffer. |
| 54 | */ |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 55 | void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 56 | { |
| 57 | const void *here = __builtin_return_address(0); |
| 58 | if (skb) { |
| 59 | int n; |
| 60 | CHECK_SLAB_OKAY(&skb->users); |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 61 | n = atomic_dec_return(select_skb_count(op)); |
Reshetova, Elena | 6335479 | 2017-06-30 13:07:58 +0300 | [diff] [blame] | 62 | trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 63 | kfree_skb(skb); |
| 64 | } |
| 65 | } |
| 66 | |
| 67 | /* |
| 68 | * Clear a queue of socket buffers. |
| 69 | */ |
| 70 | void rxrpc_purge_queue(struct sk_buff_head *list) |
| 71 | { |
| 72 | const void *here = __builtin_return_address(0); |
| 73 | struct sk_buff *skb; |
| 74 | while ((skb = skb_dequeue((list))) != NULL) { |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 75 | int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged)); |
| 76 | trace_rxrpc_skb(skb, rxrpc_skb_rx_purged, |
Reshetova, Elena | 6335479 | 2017-06-30 13:07:58 +0300 | [diff] [blame] | 77 | refcount_read(&skb->users), n, here); |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 78 | kfree_skb(skb); |
| 79 | } |
| 80 | } |