blob: 479d15c9777016ba133723be2c94b1cfc3edfb91 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common code for low-level network console, dump, and debugger code
3 *
4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5 */
6
7#ifndef _LINUX_NETPOLL_H
8#define _LINUX_NETPOLL_H
9
10#include <linux/netdevice.h>
11#include <linux/interrupt.h>
Matt Mackall53fb95d2005-08-11 19:27:43 -070012#include <linux/rcupdate.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/list.h>
14
Cong Wangb7394d22013-01-07 20:52:39 +000015union inet_addr {
16 __u32 all[4];
17 __be32 ip;
18 __be32 ip6[4];
19 struct in_addr in;
20 struct in6_addr in6;
21};
22
Linus Torvalds1da177e2005-04-16 15:20:36 -070023struct netpoll {
24 struct net_device *dev;
Stephen Hemmingerbf6bce72006-10-26 15:46:56 -070025 char dev_name[IFNAMSIZ];
26 const char *name;
Antonio Quartulli8fb479a2013-10-23 23:36:30 +020027 void (*rx_skb_hook)(struct netpoll *np, int source, struct sk_buff *skb,
28 int offset, int len);
Stephen Hemminger5de4a472006-10-26 15:46:55 -070029
Cong Wangb7394d22013-01-07 20:52:39 +000030 union inet_addr local_ip, remote_ip;
31 bool ipv6;
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 u16 local_port, remote_port;
Stephen Hemminger09538642007-11-19 19:23:29 -080033 u8 remote_mac[ETH_ALEN];
Daniel Borkmann508e14b2010-01-12 14:27:30 +000034
35 struct list_head rx; /* rx_np list element */
Neil Horman2cde6ac2013-02-11 10:25:30 +000036 struct work_struct cleanup_work;
Jeff Moyer115c1d62005-06-22 22:05:31 -070037};
38
39struct netpoll_info {
Stephen Hemminger93ec2c72006-10-26 15:46:50 -070040 atomic_t refcnt;
Daniel Borkmann508e14b2010-01-12 14:27:30 +000041
Neil Hormanca99ca12013-02-05 08:05:43 +000042 unsigned long rx_flags;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070043 spinlock_t rx_lock;
Neil Hormanbd7c4b62013-04-30 05:35:05 +000044 struct semaphore dev_lock;
Antonio Quartulli8fb479a2013-10-23 23:36:30 +020045 struct list_head rx_np; /* netpolls that registered an rx_skb_hook */
Daniel Borkmann508e14b2010-01-12 14:27:30 +000046
Cong Wangb7394d22013-01-07 20:52:39 +000047 struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -070048 struct sk_buff_head txq;
Daniel Borkmann508e14b2010-01-12 14:27:30 +000049
David Howells6d5aefb2006-12-05 19:36:26 +000050 struct delayed_work tx_work;
WANG Cong0e34e932010-05-06 00:47:21 -070051
52 struct netpoll *netpoll;
Amerigo Wang38e6bc12012-08-10 01:24:38 +000053 struct rcu_head rcu;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054};
55
Neil Hormanca99ca12013-02-05 08:05:43 +000056#ifdef CONFIG_NETPOLL
dingtianhongda6e3782013-05-27 19:53:31 +000057extern void netpoll_rx_disable(struct net_device *dev);
Neil Hormanca99ca12013-02-05 08:05:43 +000058extern void netpoll_rx_enable(struct net_device *dev);
59#else
dingtianhongda6e3782013-05-27 19:53:31 +000060static inline void netpoll_rx_disable(struct net_device *dev) { return; }
Neil Hormanca99ca12013-02-05 08:05:43 +000061static inline void netpoll_rx_enable(struct net_device *dev) { return; }
62#endif
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
Satyam Sharma0bcc1812007-08-10 15:35:05 -070065void netpoll_print_options(struct netpoll *np);
Linus Torvalds1da177e2005-04-16 15:20:36 -070066int netpoll_parse_options(struct netpoll *np, char *opt);
Amerigo Wang47be03a22012-08-10 01:24:37 +000067int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070068int netpoll_setup(struct netpoll *np);
69int netpoll_trap(void);
70void netpoll_set_trap(int trap);
Herbert Xu8fdd95e2010-06-10 16:12:48 +000071void __netpoll_cleanup(struct netpoll *np);
Neil Horman2cde6ac2013-02-11 10:25:30 +000072void __netpoll_free_async(struct netpoll *np);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073void netpoll_cleanup(struct netpoll *np);
Amerigo Wang57c5d462012-08-10 01:24:40 +000074int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
Neil Hormanc2355e12010-10-13 16:01:49 +000075void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
76 struct net_device *dev);
77static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
78{
Amerigo Wang28996562012-08-10 01:24:42 +000079 unsigned long flags;
80 local_irq_save(flags);
Neil Hormanc2355e12010-10-13 16:01:49 +000081 netpoll_send_skb_on_dev(np, skb, np->dev);
Amerigo Wang28996562012-08-10 01:24:42 +000082 local_irq_restore(flags);
Neil Hormanc2355e12010-10-13 16:01:49 +000083}
84
Eric W. Biedermanff607632014-03-14 20:47:49 -070085#ifdef CONFIG_NETPOLL_TRAP
86static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
87{
88 return !list_empty(&npinfo->rx_np);
89}
90#else
91static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
92{
93 return false;
94}
95#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
97#ifdef CONFIG_NETPOLL
Amerigo Wang77ab8a52012-08-10 01:24:46 +000098static inline bool netpoll_rx_on(struct sk_buff *skb)
Amerigo Wang91fe4a42012-08-10 01:24:41 +000099{
100 struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
101
Eric W. Biedermanff607632014-03-14 20:47:49 -0700102 return npinfo && (netpoll_rx_processing(npinfo) || npinfo->rx_flags);
Amerigo Wang91fe4a42012-08-10 01:24:41 +0000103}
104
David S. Millerffb27362010-05-06 01:20:10 -0700105static inline bool netpoll_rx(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106{
Herbert Xude85d992010-06-10 16:12:44 +0000107 struct netpoll_info *npinfo;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700108 unsigned long flags;
David S. Millerffb27362010-05-06 01:20:10 -0700109 bool ret = false;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700110
Herbert Xuf0f9dea2010-09-17 16:55:03 -0700111 local_irq_save(flags);
Herbert Xude85d992010-06-10 16:12:44 +0000112
Amerigo Wang91fe4a42012-08-10 01:24:41 +0000113 if (!netpoll_rx_on(skb))
Herbert Xude85d992010-06-10 16:12:44 +0000114 goto out;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700115
Amerigo Wang91fe4a42012-08-10 01:24:41 +0000116 npinfo = rcu_dereference_bh(skb->dev->npinfo);
Herbert Xuf0f9dea2010-09-17 16:55:03 -0700117 spin_lock(&npinfo->rx_lock);
Eric W. Biedermanff607632014-03-14 20:47:49 -0700118 /* check rx_processing again with the lock held */
119 if (netpoll_rx_processing(npinfo) && __netpoll_rx(skb, npinfo))
David S. Millerffb27362010-05-06 01:20:10 -0700120 ret = true;
Herbert Xuf0f9dea2010-09-17 16:55:03 -0700121 spin_unlock(&npinfo->rx_lock);
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700122
Herbert Xude85d992010-06-10 16:12:44 +0000123out:
Herbert Xuf0f9dea2010-09-17 16:55:03 -0700124 local_irq_restore(flags);
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700125 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126}
127
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700128static inline int netpoll_receive_skb(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700130 if (!list_empty(&skb->dev->napi_list))
131 return netpoll_rx(skb);
132 return 0;
133}
134
135static inline void *netpoll_poll_lock(struct napi_struct *napi)
136{
137 struct net_device *dev = napi->dev;
138
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700139 if (dev && dev->npinfo) {
140 spin_lock(&napi->poll_lock);
141 napi->poll_owner = smp_processor_id();
142 return napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 }
Matt Mackall53fb95d2005-08-11 19:27:43 -0700144 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145}
146
Matt Mackall53fb95d2005-08-11 19:27:43 -0700147static inline void netpoll_poll_unlock(void *have)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700149 struct napi_struct *napi = have;
Matt Mackall53fb95d2005-08-11 19:27:43 -0700150
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700151 if (napi) {
152 napi->poll_owner = -1;
153 spin_unlock(&napi->poll_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 }
155}
156
Amerigo Wang77ab8a52012-08-10 01:24:46 +0000157static inline bool netpoll_tx_running(struct net_device *dev)
Herbert Xuc18370f2010-06-10 16:12:49 +0000158{
159 return irqs_disabled();
160}
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162#else
John W. Linville969a6e52010-08-10 16:24:41 -0700163static inline bool netpoll_rx(struct sk_buff *skb)
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700164{
Amerigo Wang77ab8a52012-08-10 01:24:46 +0000165 return false;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700166}
Amerigo Wang77ab8a52012-08-10 01:24:46 +0000167static inline bool netpoll_rx_on(struct sk_buff *skb)
Herbert Xud1c76af2009-03-16 10:50:02 -0700168{
Amerigo Wang77ab8a52012-08-10 01:24:46 +0000169 return false;
Herbert Xud1c76af2009-03-16 10:50:02 -0700170}
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700171static inline int netpoll_receive_skb(struct sk_buff *skb)
172{
173 return 0;
174}
175static inline void *netpoll_poll_lock(struct napi_struct *napi)
176{
177 return NULL;
178}
179static inline void netpoll_poll_unlock(void *have)
180{
181}
182static inline void netpoll_netdev_init(struct net_device *dev)
183{
184}
Amerigo Wang77ab8a52012-08-10 01:24:46 +0000185static inline bool netpoll_tx_running(struct net_device *dev)
Herbert Xuc18370f2010-06-10 16:12:49 +0000186{
Amerigo Wang77ab8a52012-08-10 01:24:46 +0000187 return false;
Herbert Xuc18370f2010-06-10 16:12:49 +0000188}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189#endif
190
191#endif