Yuval Mintz | 6853f21 | 2018-02-28 23:29:29 +0200 | [diff] [blame] | 1 | #ifndef __LINUX_MROUTE_BASE_H |
| 2 | #define __LINUX_MROUTE_BASE_H |
| 3 | |
| 4 | #include <linux/netdevice.h> |
NeilBrown | 0eb71a9 | 2018-06-18 12:52:50 +1000 | [diff] [blame] | 5 | #include <linux/rhashtable-types.h> |
Yuval Mintz | c8d6196 | 2018-02-28 23:29:36 +0200 | [diff] [blame] | 6 | #include <linux/spinlock.h> |
Yuval Mintz | b70432f | 2018-02-28 23:29:32 +0200 | [diff] [blame] | 7 | #include <net/net_namespace.h> |
| 8 | #include <net/sock.h> |
Yuval Mintz | bc67a0d | 2018-03-26 15:01:31 +0300 | [diff] [blame] | 9 | #include <net/fib_notifier.h> |
David Ahern | cb16789 | 2018-10-15 18:56:47 -0700 | [diff] [blame^] | 10 | #include <net/ip_fib.h> |
Yuval Mintz | 6853f21 | 2018-02-28 23:29:29 +0200 | [diff] [blame] | 11 | |
| 12 | /** |
| 13 | * struct vif_device - interface representor for multicast routing |
| 14 | * @dev: network device being used |
| 15 | * @bytes_in: statistic; bytes ingressing |
| 16 | * @bytes_out: statistic; bytes egresing |
| 17 | * @pkt_in: statistic; packets ingressing |
| 18 | * @pkt_out: statistic; packets egressing |
| 19 | * @rate_limit: Traffic shaping (NI) |
| 20 | * @threshold: TTL threshold |
| 21 | * @flags: Control flags |
| 22 | * @link: Physical interface index |
| 23 | * @dev_parent_id: device parent id |
| 24 | * @local: Local address |
| 25 | * @remote: Remote address for tunnels |
| 26 | */ |
| 27 | struct vif_device { |
| 28 | struct net_device *dev; |
| 29 | unsigned long bytes_in, bytes_out; |
| 30 | unsigned long pkt_in, pkt_out; |
| 31 | unsigned long rate_limit; |
| 32 | unsigned char threshold; |
| 33 | unsigned short flags; |
| 34 | int link; |
| 35 | |
| 36 | /* Currently only used by ipmr */ |
| 37 | struct netdev_phys_item_id dev_parent_id; |
| 38 | __be32 local, remote; |
| 39 | }; |
| 40 | |
Yuval Mintz | bc67a0d | 2018-03-26 15:01:31 +0300 | [diff] [blame] | 41 | struct vif_entry_notifier_info { |
| 42 | struct fib_notifier_info info; |
| 43 | struct net_device *dev; |
| 44 | unsigned short vif_index; |
| 45 | unsigned short vif_flags; |
| 46 | u32 tb_id; |
| 47 | }; |
| 48 | |
| 49 | static inline int mr_call_vif_notifier(struct notifier_block *nb, |
| 50 | struct net *net, |
| 51 | unsigned short family, |
| 52 | enum fib_event_type event_type, |
| 53 | struct vif_device *vif, |
| 54 | unsigned short vif_index, u32 tb_id) |
| 55 | { |
| 56 | struct vif_entry_notifier_info info = { |
| 57 | .info = { |
| 58 | .family = family, |
| 59 | .net = net, |
| 60 | }, |
| 61 | .dev = vif->dev, |
| 62 | .vif_index = vif_index, |
| 63 | .vif_flags = vif->flags, |
| 64 | .tb_id = tb_id, |
| 65 | }; |
| 66 | |
| 67 | return call_fib_notifier(nb, net, event_type, &info.info); |
| 68 | } |
| 69 | |
| 70 | static inline int mr_call_vif_notifiers(struct net *net, |
| 71 | unsigned short family, |
| 72 | enum fib_event_type event_type, |
| 73 | struct vif_device *vif, |
| 74 | unsigned short vif_index, u32 tb_id, |
| 75 | unsigned int *ipmr_seq) |
| 76 | { |
| 77 | struct vif_entry_notifier_info info = { |
| 78 | .info = { |
| 79 | .family = family, |
| 80 | .net = net, |
| 81 | }, |
| 82 | .dev = vif->dev, |
| 83 | .vif_index = vif_index, |
| 84 | .vif_flags = vif->flags, |
| 85 | .tb_id = tb_id, |
| 86 | }; |
| 87 | |
| 88 | ASSERT_RTNL(); |
| 89 | (*ipmr_seq)++; |
| 90 | return call_fib_notifiers(net, event_type, &info.info); |
| 91 | } |
| 92 | |
Yuval Mintz | b70432f | 2018-02-28 23:29:32 +0200 | [diff] [blame] | 93 | #ifndef MAXVIFS |
| 94 | /* This one is nasty; value is defined in uapi using different symbols for |
| 95 | * mroute and morute6 but both map into same 32. |
| 96 | */ |
| 97 | #define MAXVIFS 32 |
| 98 | #endif |
| 99 | |
| 100 | #define VIF_EXISTS(_mrt, _idx) (!!((_mrt)->vif_table[_idx].dev)) |
| 101 | |
Yuval Mintz | 889cd83 | 2018-02-28 23:29:38 +0200 | [diff] [blame] | 102 | /* mfc_flags: |
| 103 | * MFC_STATIC - the entry was added statically (not by a routing daemon) |
| 104 | * MFC_OFFLOAD - the entry was offloaded to the hardware |
| 105 | */ |
| 106 | enum { |
| 107 | MFC_STATIC = BIT(0), |
| 108 | MFC_OFFLOAD = BIT(1), |
| 109 | }; |
| 110 | |
Yuval Mintz | b70432f | 2018-02-28 23:29:32 +0200 | [diff] [blame] | 111 | /** |
Yuval Mintz | 494fff5 | 2018-02-28 23:29:34 +0200 | [diff] [blame] | 112 | * struct mr_mfc - common multicast routing entries |
| 113 | * @mnode: rhashtable list |
| 114 | * @mfc_parent: source interface (iif) |
| 115 | * @mfc_flags: entry flags |
| 116 | * @expires: unresolved entry expire time |
| 117 | * @unresolved: unresolved cached skbs |
| 118 | * @last_assert: time of last assert |
| 119 | * @minvif: minimum VIF id |
| 120 | * @maxvif: maximum VIF id |
| 121 | * @bytes: bytes that have passed for this entry |
| 122 | * @pkt: packets that have passed for this entry |
| 123 | * @wrong_if: number of wrong source interface hits |
| 124 | * @lastuse: time of last use of the group (traffic or update) |
| 125 | * @ttls: OIF TTL threshold array |
| 126 | * @refcount: reference count for this entry |
| 127 | * @list: global entry list |
| 128 | * @rcu: used for entry destruction |
Yuval Mintz | 8c13af2 | 2018-03-26 15:01:36 +0300 | [diff] [blame] | 129 | * @free: Operation used for freeing an entry under RCU |
Yuval Mintz | 494fff5 | 2018-02-28 23:29:34 +0200 | [diff] [blame] | 130 | */ |
| 131 | struct mr_mfc { |
| 132 | struct rhlist_head mnode; |
| 133 | unsigned short mfc_parent; |
| 134 | int mfc_flags; |
| 135 | |
| 136 | union { |
| 137 | struct { |
| 138 | unsigned long expires; |
| 139 | struct sk_buff_head unresolved; |
| 140 | } unres; |
| 141 | struct { |
| 142 | unsigned long last_assert; |
| 143 | int minvif; |
| 144 | int maxvif; |
| 145 | unsigned long bytes; |
| 146 | unsigned long pkt; |
| 147 | unsigned long wrong_if; |
| 148 | unsigned long lastuse; |
| 149 | unsigned char ttls[MAXVIFS]; |
| 150 | refcount_t refcount; |
| 151 | } res; |
| 152 | } mfc_un; |
| 153 | struct list_head list; |
| 154 | struct rcu_head rcu; |
Yuval Mintz | 8c13af2 | 2018-03-26 15:01:36 +0300 | [diff] [blame] | 155 | void (*free)(struct rcu_head *head); |
Yuval Mintz | 494fff5 | 2018-02-28 23:29:34 +0200 | [diff] [blame] | 156 | }; |
| 157 | |
Yuval Mintz | 8c13af2 | 2018-03-26 15:01:36 +0300 | [diff] [blame] | 158 | static inline void mr_cache_put(struct mr_mfc *c) |
| 159 | { |
| 160 | if (refcount_dec_and_test(&c->mfc_un.res.refcount)) |
| 161 | call_rcu(&c->rcu, c->free); |
| 162 | } |
| 163 | |
| 164 | static inline void mr_cache_hold(struct mr_mfc *c) |
| 165 | { |
| 166 | refcount_inc(&c->mfc_un.res.refcount); |
| 167 | } |
| 168 | |
Yuval Mintz | 54c4cad | 2018-03-26 15:01:32 +0300 | [diff] [blame] | 169 | struct mfc_entry_notifier_info { |
| 170 | struct fib_notifier_info info; |
| 171 | struct mr_mfc *mfc; |
| 172 | u32 tb_id; |
| 173 | }; |
| 174 | |
| 175 | static inline int mr_call_mfc_notifier(struct notifier_block *nb, |
| 176 | struct net *net, |
| 177 | unsigned short family, |
| 178 | enum fib_event_type event_type, |
| 179 | struct mr_mfc *mfc, u32 tb_id) |
| 180 | { |
| 181 | struct mfc_entry_notifier_info info = { |
| 182 | .info = { |
| 183 | .family = family, |
| 184 | .net = net, |
| 185 | }, |
| 186 | .mfc = mfc, |
| 187 | .tb_id = tb_id |
| 188 | }; |
| 189 | |
| 190 | return call_fib_notifier(nb, net, event_type, &info.info); |
| 191 | } |
| 192 | |
| 193 | static inline int mr_call_mfc_notifiers(struct net *net, |
| 194 | unsigned short family, |
| 195 | enum fib_event_type event_type, |
| 196 | struct mr_mfc *mfc, u32 tb_id, |
| 197 | unsigned int *ipmr_seq) |
| 198 | { |
| 199 | struct mfc_entry_notifier_info info = { |
| 200 | .info = { |
| 201 | .family = family, |
| 202 | .net = net, |
| 203 | }, |
| 204 | .mfc = mfc, |
| 205 | .tb_id = tb_id |
| 206 | }; |
| 207 | |
| 208 | ASSERT_RTNL(); |
| 209 | (*ipmr_seq)++; |
| 210 | return call_fib_notifiers(net, event_type, &info.info); |
| 211 | } |
| 212 | |
Yuval Mintz | 845c9a7 | 2018-02-28 23:29:35 +0200 | [diff] [blame] | 213 | struct mr_table; |
| 214 | |
| 215 | /** |
| 216 | * struct mr_table_ops - callbacks and info for protocol-specific ops |
| 217 | * @rht_params: parameters for accessing the MFC hash |
| 218 | * @cmparg_any: a hash key to be used for matching on (*,*) routes |
| 219 | */ |
| 220 | struct mr_table_ops { |
| 221 | const struct rhashtable_params *rht_params; |
| 222 | void *cmparg_any; |
| 223 | }; |
| 224 | |
Yuval Mintz | 494fff5 | 2018-02-28 23:29:34 +0200 | [diff] [blame] | 225 | /** |
Yuval Mintz | b70432f | 2018-02-28 23:29:32 +0200 | [diff] [blame] | 226 | * struct mr_table - a multicast routing table |
| 227 | * @list: entry within a list of multicast routing tables |
| 228 | * @net: net where this table belongs |
Yuval Mintz | 845c9a7 | 2018-02-28 23:29:35 +0200 | [diff] [blame] | 229 | * @ops: protocol specific operations |
Yuval Mintz | b70432f | 2018-02-28 23:29:32 +0200 | [diff] [blame] | 230 | * @id: identifier of the table |
| 231 | * @mroute_sk: socket associated with the table |
| 232 | * @ipmr_expire_timer: timer for handling unresolved routes |
| 233 | * @mfc_unres_queue: list of unresolved MFC entries |
| 234 | * @vif_table: array containing all possible vifs |
| 235 | * @mfc_hash: Hash table of all resolved routes for easy lookup |
| 236 | * @mfc_cache_list: list of resovled routes for possible traversal |
| 237 | * @maxvif: Identifier of highest value vif currently in use |
| 238 | * @cache_resolve_queue_len: current size of unresolved queue |
| 239 | * @mroute_do_assert: Whether to inform userspace on wrong ingress |
| 240 | * @mroute_do_pim: Whether to receive IGMP PIMv1 |
| 241 | * @mroute_reg_vif_num: PIM-device vif index |
| 242 | */ |
| 243 | struct mr_table { |
| 244 | struct list_head list; |
| 245 | possible_net_t net; |
Yuval Mintz | 845c9a7 | 2018-02-28 23:29:35 +0200 | [diff] [blame] | 246 | struct mr_table_ops ops; |
Yuval Mintz | b70432f | 2018-02-28 23:29:32 +0200 | [diff] [blame] | 247 | u32 id; |
| 248 | struct sock __rcu *mroute_sk; |
| 249 | struct timer_list ipmr_expire_timer; |
| 250 | struct list_head mfc_unres_queue; |
| 251 | struct vif_device vif_table[MAXVIFS]; |
| 252 | struct rhltable mfc_hash; |
| 253 | struct list_head mfc_cache_list; |
| 254 | int maxvif; |
| 255 | atomic_t cache_resolve_queue_len; |
| 256 | bool mroute_do_assert; |
| 257 | bool mroute_do_pim; |
Nikolay Aleksandrov | c921c20 | 2018-07-13 12:16:43 +0300 | [diff] [blame] | 258 | bool mroute_do_wrvifwhole; |
Yuval Mintz | b70432f | 2018-02-28 23:29:32 +0200 | [diff] [blame] | 259 | int mroute_reg_vif_num; |
| 260 | }; |
| 261 | |
Yuval Mintz | 6853f21 | 2018-02-28 23:29:29 +0200 | [diff] [blame] | 262 | #ifdef CONFIG_IP_MROUTE_COMMON |
| 263 | void vif_device_init(struct vif_device *v, |
| 264 | struct net_device *dev, |
| 265 | unsigned long rate_limit, |
| 266 | unsigned char threshold, |
| 267 | unsigned short flags, |
| 268 | unsigned short get_iflink_mask); |
Yuval Mintz | 0bbbf0e | 2018-02-28 23:29:33 +0200 | [diff] [blame] | 269 | |
| 270 | struct mr_table * |
| 271 | mr_table_alloc(struct net *net, u32 id, |
Yuval Mintz | 845c9a7 | 2018-02-28 23:29:35 +0200 | [diff] [blame] | 272 | struct mr_table_ops *ops, |
Yuval Mintz | 0bbbf0e | 2018-02-28 23:29:33 +0200 | [diff] [blame] | 273 | void (*expire_func)(struct timer_list *t), |
| 274 | void (*table_set)(struct mr_table *mrt, |
| 275 | struct net *net)); |
Yuval Mintz | 845c9a7 | 2018-02-28 23:29:35 +0200 | [diff] [blame] | 276 | |
| 277 | /* These actually return 'struct mr_mfc *', but to avoid need for explicit |
| 278 | * castings they simply return void. |
| 279 | */ |
| 280 | void *mr_mfc_find_parent(struct mr_table *mrt, |
| 281 | void *hasharg, int parent); |
| 282 | void *mr_mfc_find_any_parent(struct mr_table *mrt, int vifi); |
| 283 | void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg); |
| 284 | |
Yuval Mintz | 7b0db85 | 2018-02-28 23:29:39 +0200 | [diff] [blame] | 285 | int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, |
| 286 | struct mr_mfc *c, struct rtmsg *rtm); |
David Ahern | e1cedae | 2018-10-15 18:56:46 -0700 | [diff] [blame] | 287 | int mr_table_dump(struct mr_table *mrt, struct sk_buff *skb, |
| 288 | struct netlink_callback *cb, |
| 289 | int (*fill)(struct mr_table *mrt, struct sk_buff *skb, |
| 290 | u32 portid, u32 seq, struct mr_mfc *c, |
| 291 | int cmd, int flags), |
David Ahern | cb16789 | 2018-10-15 18:56:47 -0700 | [diff] [blame^] | 292 | spinlock_t *lock, struct fib_dump_filter *filter); |
Yuval Mintz | 7b0db85 | 2018-02-28 23:29:39 +0200 | [diff] [blame] | 293 | int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb, |
| 294 | struct mr_table *(*iter)(struct net *net, |
| 295 | struct mr_table *mrt), |
| 296 | int (*fill)(struct mr_table *mrt, |
| 297 | struct sk_buff *skb, |
| 298 | u32 portid, u32 seq, struct mr_mfc *c, |
| 299 | int cmd, int flags), |
David Ahern | cb16789 | 2018-10-15 18:56:47 -0700 | [diff] [blame^] | 300 | spinlock_t *lock, struct fib_dump_filter *filter); |
Yuval Mintz | cdc9f94 | 2018-03-26 15:01:33 +0300 | [diff] [blame] | 301 | |
| 302 | int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family, |
| 303 | int (*rules_dump)(struct net *net, |
| 304 | struct notifier_block *nb), |
| 305 | struct mr_table *(*mr_iter)(struct net *net, |
| 306 | struct mr_table *mrt), |
| 307 | rwlock_t *mrt_lock); |
Yuval Mintz | 6853f21 | 2018-02-28 23:29:29 +0200 | [diff] [blame] | 308 | #else |
| 309 | static inline void vif_device_init(struct vif_device *v, |
| 310 | struct net_device *dev, |
| 311 | unsigned long rate_limit, |
| 312 | unsigned char threshold, |
| 313 | unsigned short flags, |
| 314 | unsigned short get_iflink_mask) |
| 315 | { |
| 316 | } |
Yuval Mintz | 0bbbf0e | 2018-02-28 23:29:33 +0200 | [diff] [blame] | 317 | |
Yuval Mintz | 845c9a7 | 2018-02-28 23:29:35 +0200 | [diff] [blame] | 318 | static inline void *mr_mfc_find_parent(struct mr_table *mrt, |
| 319 | void *hasharg, int parent) |
| 320 | { |
| 321 | return NULL; |
| 322 | } |
| 323 | |
| 324 | static inline void *mr_mfc_find_any_parent(struct mr_table *mrt, |
| 325 | int vifi) |
| 326 | { |
| 327 | return NULL; |
| 328 | } |
| 329 | |
| 330 | static inline struct mr_mfc *mr_mfc_find_any(struct mr_table *mrt, |
| 331 | int vifi, void *hasharg) |
| 332 | { |
| 333 | return NULL; |
| 334 | } |
Yuval Mintz | 7b0db85 | 2018-02-28 23:29:39 +0200 | [diff] [blame] | 335 | |
| 336 | static inline int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, |
| 337 | struct mr_mfc *c, struct rtmsg *rtm) |
| 338 | { |
| 339 | return -EINVAL; |
| 340 | } |
| 341 | |
| 342 | static inline int |
| 343 | mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb, |
| 344 | struct mr_table *(*iter)(struct net *net, |
| 345 | struct mr_table *mrt), |
| 346 | int (*fill)(struct mr_table *mrt, |
| 347 | struct sk_buff *skb, |
| 348 | u32 portid, u32 seq, struct mr_mfc *c, |
| 349 | int cmd, int flags), |
David Ahern | cb16789 | 2018-10-15 18:56:47 -0700 | [diff] [blame^] | 350 | spinlock_t *lock, struct fib_dump_filter *filter) |
Yuval Mintz | 7b0db85 | 2018-02-28 23:29:39 +0200 | [diff] [blame] | 351 | { |
| 352 | return -EINVAL; |
| 353 | } |
Yuval Mintz | cdc9f94 | 2018-03-26 15:01:33 +0300 | [diff] [blame] | 354 | |
| 355 | static inline int mr_dump(struct net *net, struct notifier_block *nb, |
| 356 | unsigned short family, |
| 357 | int (*rules_dump)(struct net *net, |
| 358 | struct notifier_block *nb), |
| 359 | struct mr_table *(*mr_iter)(struct net *net, |
| 360 | struct mr_table *mrt), |
| 361 | rwlock_t *mrt_lock) |
| 362 | { |
| 363 | return -EINVAL; |
| 364 | } |
Yuval Mintz | 6853f21 | 2018-02-28 23:29:29 +0200 | [diff] [blame] | 365 | #endif |
Yuval Mintz | 845c9a7 | 2018-02-28 23:29:35 +0200 | [diff] [blame] | 366 | |
| 367 | static inline void *mr_mfc_find(struct mr_table *mrt, void *hasharg) |
| 368 | { |
| 369 | return mr_mfc_find_parent(mrt, hasharg, -1); |
| 370 | } |
Yuval Mintz | c8d6196 | 2018-02-28 23:29:36 +0200 | [diff] [blame] | 371 | |
| 372 | #ifdef CONFIG_PROC_FS |
Yuval Mintz | 3feda6b | 2018-02-28 23:29:37 +0200 | [diff] [blame] | 373 | struct mr_vif_iter { |
| 374 | struct seq_net_private p; |
| 375 | struct mr_table *mrt; |
| 376 | int ct; |
| 377 | }; |
| 378 | |
Yuval Mintz | c8d6196 | 2018-02-28 23:29:36 +0200 | [diff] [blame] | 379 | struct mr_mfc_iter { |
| 380 | struct seq_net_private p; |
| 381 | struct mr_table *mrt; |
| 382 | struct list_head *cache; |
| 383 | |
| 384 | /* Lock protecting the mr_table's unresolved queue */ |
| 385 | spinlock_t *lock; |
| 386 | }; |
| 387 | |
| 388 | #ifdef CONFIG_IP_MROUTE_COMMON |
Yuval Mintz | 3feda6b | 2018-02-28 23:29:37 +0200 | [diff] [blame] | 389 | void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos); |
| 390 | void *mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos); |
| 391 | |
| 392 | static inline void *mr_vif_seq_start(struct seq_file *seq, loff_t *pos) |
| 393 | { |
| 394 | return *pos ? mr_vif_seq_idx(seq_file_net(seq), |
| 395 | seq->private, *pos - 1) |
| 396 | : SEQ_START_TOKEN; |
| 397 | } |
| 398 | |
Yuval Mintz | c8d6196 | 2018-02-28 23:29:36 +0200 | [diff] [blame] | 399 | /* These actually return 'struct mr_mfc *', but to avoid need for explicit |
| 400 | * castings they simply return void. |
| 401 | */ |
| 402 | void *mr_mfc_seq_idx(struct net *net, |
| 403 | struct mr_mfc_iter *it, loff_t pos); |
| 404 | void *mr_mfc_seq_next(struct seq_file *seq, void *v, |
| 405 | loff_t *pos); |
| 406 | |
| 407 | static inline void *mr_mfc_seq_start(struct seq_file *seq, loff_t *pos, |
| 408 | struct mr_table *mrt, spinlock_t *lock) |
| 409 | { |
| 410 | struct mr_mfc_iter *it = seq->private; |
| 411 | |
| 412 | it->mrt = mrt; |
| 413 | it->cache = NULL; |
| 414 | it->lock = lock; |
| 415 | |
| 416 | return *pos ? mr_mfc_seq_idx(seq_file_net(seq), |
| 417 | seq->private, *pos - 1) |
| 418 | : SEQ_START_TOKEN; |
| 419 | } |
| 420 | |
| 421 | static inline void mr_mfc_seq_stop(struct seq_file *seq, void *v) |
| 422 | { |
| 423 | struct mr_mfc_iter *it = seq->private; |
| 424 | struct mr_table *mrt = it->mrt; |
| 425 | |
| 426 | if (it->cache == &mrt->mfc_unres_queue) |
| 427 | spin_unlock_bh(it->lock); |
| 428 | else if (it->cache == &mrt->mfc_cache_list) |
| 429 | rcu_read_unlock(); |
| 430 | } |
| 431 | #else |
Yuval Mintz | 3feda6b | 2018-02-28 23:29:37 +0200 | [diff] [blame] | 432 | static inline void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, |
| 433 | loff_t pos) |
| 434 | { |
| 435 | return NULL; |
| 436 | } |
| 437 | |
| 438 | static inline void *mr_vif_seq_next(struct seq_file *seq, |
| 439 | void *v, loff_t *pos) |
| 440 | { |
| 441 | return NULL; |
| 442 | } |
| 443 | |
| 444 | static inline void *mr_vif_seq_start(struct seq_file *seq, loff_t *pos) |
| 445 | { |
| 446 | return NULL; |
| 447 | } |
| 448 | |
Yuval Mintz | c8d6196 | 2018-02-28 23:29:36 +0200 | [diff] [blame] | 449 | static inline void *mr_mfc_seq_idx(struct net *net, |
| 450 | struct mr_mfc_iter *it, loff_t pos) |
| 451 | { |
| 452 | return NULL; |
| 453 | } |
| 454 | |
| 455 | static inline void *mr_mfc_seq_next(struct seq_file *seq, void *v, |
| 456 | loff_t *pos) |
| 457 | { |
| 458 | return NULL; |
| 459 | } |
| 460 | |
| 461 | static inline void *mr_mfc_seq_start(struct seq_file *seq, loff_t *pos, |
| 462 | struct mr_table *mrt, spinlock_t *lock) |
| 463 | { |
| 464 | return NULL; |
| 465 | } |
| 466 | |
| 467 | static inline void mr_mfc_seq_stop(struct seq_file *seq, void *v) |
| 468 | { |
| 469 | } |
| 470 | #endif |
| 471 | #endif |
Yuval Mintz | 6853f21 | 2018-02-28 23:29:29 +0200 | [diff] [blame] | 472 | #endif |