blob: ab9aaaff8d04936f237e5dceee7740e81d027e0d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common code for low-level network console, dump, and debugger code
3 *
4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5 */
6
7#ifndef _LINUX_NETPOLL_H
8#define _LINUX_NETPOLL_H
9
10#include <linux/netdevice.h>
11#include <linux/interrupt.h>
Matt Mackall53fb95d2005-08-11 19:27:43 -070012#include <linux/rcupdate.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/list.h>
14
Cong Wangb7394d22013-01-07 20:52:39 +000015union inet_addr {
16 __u32 all[4];
17 __be32 ip;
18 __be32 ip6[4];
19 struct in_addr in;
20 struct in6_addr in6;
21};
22
Linus Torvalds1da177e2005-04-16 15:20:36 -070023struct netpoll {
24 struct net_device *dev;
Stephen Hemmingerbf6bce72006-10-26 15:46:56 -070025 char dev_name[IFNAMSIZ];
26 const char *name;
Antonio Quartulli8fb479a2013-10-23 23:36:30 +020027 void (*rx_skb_hook)(struct netpoll *np, int source, struct sk_buff *skb,
28 int offset, int len);
Stephen Hemminger5de4a472006-10-26 15:46:55 -070029
Cong Wangb7394d22013-01-07 20:52:39 +000030 union inet_addr local_ip, remote_ip;
31 bool ipv6;
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 u16 local_port, remote_port;
Stephen Hemminger09538642007-11-19 19:23:29 -080033 u8 remote_mac[ETH_ALEN];
Daniel Borkmann508e14b2010-01-12 14:27:30 +000034
35 struct list_head rx; /* rx_np list element */
Neil Horman2cde6ac2013-02-11 10:25:30 +000036 struct work_struct cleanup_work;
Jeff Moyer115c1d62005-06-22 22:05:31 -070037};
38
39struct netpoll_info {
Stephen Hemminger93ec2c72006-10-26 15:46:50 -070040 atomic_t refcnt;
Daniel Borkmann508e14b2010-01-12 14:27:30 +000041
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070042 spinlock_t rx_lock;
Neil Hormanbd7c4b62013-04-30 05:35:05 +000043 struct semaphore dev_lock;
Antonio Quartulli8fb479a2013-10-23 23:36:30 +020044 struct list_head rx_np; /* netpolls that registered an rx_skb_hook */
Daniel Borkmann508e14b2010-01-12 14:27:30 +000045
Cong Wangb7394d22013-01-07 20:52:39 +000046 struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -070047 struct sk_buff_head txq;
Daniel Borkmann508e14b2010-01-12 14:27:30 +000048
David Howells6d5aefb2006-12-05 19:36:26 +000049 struct delayed_work tx_work;
WANG Cong0e34e932010-05-06 00:47:21 -070050
51 struct netpoll *netpoll;
Amerigo Wang38e6bc12012-08-10 01:24:38 +000052 struct rcu_head rcu;
Linus Torvalds1da177e2005-04-16 15:20:36 -070053};
54
Neil Hormanca99ca12013-02-05 08:05:43 +000055#ifdef CONFIG_NETPOLL
dingtianhongda6e3782013-05-27 19:53:31 +000056extern void netpoll_rx_disable(struct net_device *dev);
Neil Hormanca99ca12013-02-05 08:05:43 +000057extern void netpoll_rx_enable(struct net_device *dev);
58#else
dingtianhongda6e3782013-05-27 19:53:31 +000059static inline void netpoll_rx_disable(struct net_device *dev) { return; }
Neil Hormanca99ca12013-02-05 08:05:43 +000060static inline void netpoll_rx_enable(struct net_device *dev) { return; }
61#endif
62
Linus Torvalds1da177e2005-04-16 15:20:36 -070063void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
Satyam Sharma0bcc1812007-08-10 15:35:05 -070064void netpoll_print_options(struct netpoll *np);
Linus Torvalds1da177e2005-04-16 15:20:36 -070065int netpoll_parse_options(struct netpoll *np, char *opt);
Amerigo Wang47be03a22012-08-10 01:24:37 +000066int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070067int netpoll_setup(struct netpoll *np);
Herbert Xu8fdd95e2010-06-10 16:12:48 +000068void __netpoll_cleanup(struct netpoll *np);
Neil Horman2cde6ac2013-02-11 10:25:30 +000069void __netpoll_free_async(struct netpoll *np);
Linus Torvalds1da177e2005-04-16 15:20:36 -070070void netpoll_cleanup(struct netpoll *np);
Amerigo Wang57c5d462012-08-10 01:24:40 +000071int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
Neil Hormanc2355e12010-10-13 16:01:49 +000072void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
73 struct net_device *dev);
74static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
75{
Amerigo Wang28996562012-08-10 01:24:42 +000076 unsigned long flags;
77 local_irq_save(flags);
Neil Hormanc2355e12010-10-13 16:01:49 +000078 netpoll_send_skb_on_dev(np, skb, np->dev);
Amerigo Wang28996562012-08-10 01:24:42 +000079 local_irq_restore(flags);
Neil Hormanc2355e12010-10-13 16:01:49 +000080}
81
Eric W. Biedermanff607632014-03-14 20:47:49 -070082#ifdef CONFIG_NETPOLL_TRAP
Eric W. Biedermanad8d4752014-03-14 20:49:43 -070083int netpoll_trap(void);
84void netpoll_set_trap(int trap);
Eric W. Biedermanff607632014-03-14 20:47:49 -070085static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
86{
87 return !list_empty(&npinfo->rx_np);
88}
89#else
Eric W. Biedermanad8d4752014-03-14 20:49:43 -070090static inline int netpoll_trap(void)
91{
92 return 0;
93}
94static inline void netpoll_set_trap(int trap)
95{
96}
Eric W. Biedermanff607632014-03-14 20:47:49 -070097static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
98{
99 return false;
100}
101#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
103#ifdef CONFIG_NETPOLL
Amerigo Wang77ab8a52012-08-10 01:24:46 +0000104static inline bool netpoll_rx_on(struct sk_buff *skb)
Amerigo Wang91fe4a42012-08-10 01:24:41 +0000105{
106 struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
107
Eric W. Biedermanb6bacd52014-03-14 20:48:28 -0700108 return npinfo && netpoll_rx_processing(npinfo);
Amerigo Wang91fe4a42012-08-10 01:24:41 +0000109}
110
David S. Millerffb27362010-05-06 01:20:10 -0700111static inline bool netpoll_rx(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112{
Herbert Xude85d992010-06-10 16:12:44 +0000113 struct netpoll_info *npinfo;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700114 unsigned long flags;
David S. Millerffb27362010-05-06 01:20:10 -0700115 bool ret = false;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700116
Herbert Xuf0f9dea2010-09-17 16:55:03 -0700117 local_irq_save(flags);
Herbert Xude85d992010-06-10 16:12:44 +0000118
Amerigo Wang91fe4a42012-08-10 01:24:41 +0000119 if (!netpoll_rx_on(skb))
Herbert Xude85d992010-06-10 16:12:44 +0000120 goto out;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700121
Amerigo Wang91fe4a42012-08-10 01:24:41 +0000122 npinfo = rcu_dereference_bh(skb->dev->npinfo);
Herbert Xuf0f9dea2010-09-17 16:55:03 -0700123 spin_lock(&npinfo->rx_lock);
Eric W. Biedermanff607632014-03-14 20:47:49 -0700124 /* check rx_processing again with the lock held */
125 if (netpoll_rx_processing(npinfo) && __netpoll_rx(skb, npinfo))
David S. Millerffb27362010-05-06 01:20:10 -0700126 ret = true;
Herbert Xuf0f9dea2010-09-17 16:55:03 -0700127 spin_unlock(&npinfo->rx_lock);
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700128
Herbert Xude85d992010-06-10 16:12:44 +0000129out:
Herbert Xuf0f9dea2010-09-17 16:55:03 -0700130 local_irq_restore(flags);
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700131 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132}
133
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700134static inline int netpoll_receive_skb(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700136 if (!list_empty(&skb->dev->napi_list))
137 return netpoll_rx(skb);
138 return 0;
139}
140
141static inline void *netpoll_poll_lock(struct napi_struct *napi)
142{
143 struct net_device *dev = napi->dev;
144
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700145 if (dev && dev->npinfo) {
146 spin_lock(&napi->poll_lock);
147 napi->poll_owner = smp_processor_id();
148 return napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 }
Matt Mackall53fb95d2005-08-11 19:27:43 -0700150 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151}
152
Matt Mackall53fb95d2005-08-11 19:27:43 -0700153static inline void netpoll_poll_unlock(void *have)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700155 struct napi_struct *napi = have;
Matt Mackall53fb95d2005-08-11 19:27:43 -0700156
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700157 if (napi) {
158 napi->poll_owner = -1;
159 spin_unlock(&napi->poll_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 }
161}
162
Amerigo Wang77ab8a52012-08-10 01:24:46 +0000163static inline bool netpoll_tx_running(struct net_device *dev)
Herbert Xuc18370f2010-06-10 16:12:49 +0000164{
165 return irqs_disabled();
166}
167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168#else
John W. Linville969a6e52010-08-10 16:24:41 -0700169static inline bool netpoll_rx(struct sk_buff *skb)
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700170{
Amerigo Wang77ab8a52012-08-10 01:24:46 +0000171 return false;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700172}
Amerigo Wang77ab8a52012-08-10 01:24:46 +0000173static inline bool netpoll_rx_on(struct sk_buff *skb)
Herbert Xud1c76af2009-03-16 10:50:02 -0700174{
Amerigo Wang77ab8a52012-08-10 01:24:46 +0000175 return false;
Herbert Xud1c76af2009-03-16 10:50:02 -0700176}
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700177static inline int netpoll_receive_skb(struct sk_buff *skb)
178{
179 return 0;
180}
181static inline void *netpoll_poll_lock(struct napi_struct *napi)
182{
183 return NULL;
184}
185static inline void netpoll_poll_unlock(void *have)
186{
187}
188static inline void netpoll_netdev_init(struct net_device *dev)
189{
190}
Amerigo Wang77ab8a52012-08-10 01:24:46 +0000191static inline bool netpoll_tx_running(struct net_device *dev)
Herbert Xuc18370f2010-06-10 16:12:49 +0000192{
Amerigo Wang77ab8a52012-08-10 01:24:46 +0000193 return false;
Herbert Xuc18370f2010-06-10 16:12:49 +0000194}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195#endif
196
197#endif