blob: 9ad5045b7c2f34e2fea3d5541e53c3401e5ab3ad [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
David Howells17926a72007-04-26 15:48:28 -07002/* ar-skbuff.c: socket buffer destruction handling
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
David Howells17926a72007-04-26 15:48:28 -07006 */
7
Joe Perches9b6d5392016-06-02 12:08:52 -07008#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
David Howells17926a72007-04-26 15:48:28 -070010#include <linux/module.h>
11#include <linux/net.h>
12#include <linux/skbuff.h>
13#include <net/sock.h>
14#include <net/af_rxrpc.h>
15#include "ar-internal.h"
16
David Howells71f3ca42016-09-17 10:49:14 +010017#define select_skb_count(op) (op >= rxrpc_skb_tx_cleaned ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs)
18
David Howells17926a72007-04-26 15:48:28 -070019/*
David Howells71f3ca42016-09-17 10:49:14 +010020 * Note the allocation or reception of a socket buffer.
David Howellsdf844fd2016-08-23 15:27:24 +010021 */
David Howells71f3ca42016-09-17 10:49:14 +010022void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
David Howellsdf844fd2016-08-23 15:27:24 +010023{
24 const void *here = __builtin_return_address(0);
David Howells71f3ca42016-09-17 10:49:14 +010025 int n = atomic_inc_return(select_skb_count(op));
Reshetova, Elena63354792017-06-30 13:07:58 +030026 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
David Howellsdf844fd2016-08-23 15:27:24 +010027}
28
29/*
30 * Note the re-emergence of a socket buffer from a queue or buffer.
31 */
David Howells71f3ca42016-09-17 10:49:14 +010032void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
David Howellsdf844fd2016-08-23 15:27:24 +010033{
34 const void *here = __builtin_return_address(0);
35 if (skb) {
David Howells71f3ca42016-09-17 10:49:14 +010036 int n = atomic_read(select_skb_count(op));
Reshetova, Elena63354792017-06-30 13:07:58 +030037 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
David Howellsdf844fd2016-08-23 15:27:24 +010038 }
39}
40
41/*
42 * Note the addition of a ref on a socket buffer.
43 */
David Howells71f3ca42016-09-17 10:49:14 +010044void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
David Howellsdf844fd2016-08-23 15:27:24 +010045{
46 const void *here = __builtin_return_address(0);
David Howells71f3ca42016-09-17 10:49:14 +010047 int n = atomic_inc_return(select_skb_count(op));
Reshetova, Elena63354792017-06-30 13:07:58 +030048 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
David Howellsdf844fd2016-08-23 15:27:24 +010049 skb_get(skb);
50}
51
52/*
53 * Note the destruction of a socket buffer.
54 */
David Howells71f3ca42016-09-17 10:49:14 +010055void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
David Howellsdf844fd2016-08-23 15:27:24 +010056{
57 const void *here = __builtin_return_address(0);
58 if (skb) {
59 int n;
60 CHECK_SLAB_OKAY(&skb->users);
David Howells71f3ca42016-09-17 10:49:14 +010061 n = atomic_dec_return(select_skb_count(op));
Reshetova, Elena63354792017-06-30 13:07:58 +030062 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
David Howellsdf844fd2016-08-23 15:27:24 +010063 kfree_skb(skb);
64 }
65}
66
67/*
68 * Clear a queue of socket buffers.
69 */
70void rxrpc_purge_queue(struct sk_buff_head *list)
71{
72 const void *here = __builtin_return_address(0);
73 struct sk_buff *skb;
74 while ((skb = skb_dequeue((list))) != NULL) {
David Howells71f3ca42016-09-17 10:49:14 +010075 int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged));
76 trace_rxrpc_skb(skb, rxrpc_skb_rx_purged,
Reshetova, Elena63354792017-06-30 13:07:58 +030077 refcount_read(&skb->users), n, here);
David Howellsdf844fd2016-08-23 15:27:24 +010078 kfree_skb(skb);
79 }
80}