blob: 6fe798c2df1a5303cd61cd3ad53cd2f9385d16de [file] [log] [blame]
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +01001/*
2 * Berkeley Packet Filter based traffic classifier
3 *
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
6 * ematches.
7 *
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/skbuff.h>
18#include <linux/filter.h>
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010019#include <linux/bpf.h>
Cong Wang76cf5462017-09-25 10:13:49 -070020#include <linux/idr.h>
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010021
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010022#include <net/rtnetlink.h>
23#include <net/pkt_cls.h>
24#include <net/sock.h>
25
26MODULE_LICENSE("GPL");
27MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
28MODULE_DESCRIPTION("TC BPF based classifier");
29
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010030#define CLS_BPF_NAME_LEN 256
Jakub Kicinski0d01d452016-09-21 11:43:54 +010031#define CLS_BPF_SUPPORTED_GEN_FLAGS \
Jakub Kicinskieadb4142016-09-21 11:43:55 +010032 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010033
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010034struct cls_bpf_head {
35 struct list_head plist;
Cong Wang76cf5462017-09-25 10:13:49 -070036 struct idr handle_idr;
John Fastabend1f947bf2014-09-12 20:10:24 -070037 struct rcu_head rcu;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010038};
39
40struct cls_bpf_prog {
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -070041 struct bpf_prog *filter;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010042 struct list_head link;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010043 struct tcf_result res;
Daniel Borkmann045efa82015-09-15 23:05:42 -070044 bool exts_integrated;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +010045 bool offloaded;
Jakub Kicinski0d01d452016-09-21 11:43:54 +010046 u32 gen_flags;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010047 struct tcf_exts exts;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010048 u32 handle;
Daniel Borkmann55556dd2016-11-26 01:28:05 +010049 u16 bpf_num_ops;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010050 struct sock_filter *bpf_ops;
51 const char *bpf_name;
John Fastabend1f947bf2014-09-12 20:10:24 -070052 struct tcf_proto *tp;
Cong Wange910af672017-10-26 18:24:30 -070053 union {
54 struct work_struct work;
55 struct rcu_head rcu;
56 };
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010057};
58
59static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
60 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
Daniel Borkmann045efa82015-09-15 23:05:42 -070061 [TCA_BPF_FLAGS] = { .type = NLA_U32 },
Jakub Kicinski0d01d452016-09-21 11:43:54 +010062 [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 },
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010063 [TCA_BPF_FD] = { .type = NLA_U32 },
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -040064 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING,
65 .len = CLS_BPF_NAME_LEN },
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010066 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
67 [TCA_BPF_OPS] = { .type = NLA_BINARY,
68 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
69};
70
Daniel Borkmann045efa82015-09-15 23:05:42 -070071static int cls_bpf_exec_opcode(int code)
72{
73 switch (code) {
74 case TC_ACT_OK:
Daniel Borkmann045efa82015-09-15 23:05:42 -070075 case TC_ACT_SHOT:
Daniel Borkmann045efa82015-09-15 23:05:42 -070076 case TC_ACT_STOLEN:
Jiri Pirkoe25ea212017-06-06 14:12:02 +020077 case TC_ACT_TRAP:
Alexei Starovoitov27b29f62015-09-15 23:05:43 -070078 case TC_ACT_REDIRECT:
Daniel Borkmann045efa82015-09-15 23:05:42 -070079 case TC_ACT_UNSPEC:
80 return code;
81 default:
82 return TC_ACT_UNSPEC;
83 }
84}
85
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010086static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
87 struct tcf_result *res)
88{
WANG Cong80dcbd12014-09-15 14:21:50 -070089 struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
Daniel Borkmannfdc54322016-01-07 15:50:22 +010090 bool at_ingress = skb_at_tc_ingress(skb);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010091 struct cls_bpf_prog *prog;
Daniel Borkmann54720df2015-03-12 20:03:12 +010092 int ret = -1;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010093
Daniel Borkmann54720df2015-03-12 20:03:12 +010094 /* Needed here for accessing maps. */
95 rcu_read_lock();
John Fastabend1f947bf2014-09-12 20:10:24 -070096 list_for_each_entry_rcu(prog, &head->plist, link) {
Alexei Starovoitov34312052015-06-04 10:11:53 -070097 int filter_res;
98
Daniel Borkmann045efa82015-09-15 23:05:42 -070099 qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
100
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100101 if (tc_skip_sw(prog->gen_flags)) {
102 filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
103 } else if (at_ingress) {
Alexei Starovoitov34312052015-06-04 10:11:53 -0700104 /* It is safe to push/pull even if skb_shared() */
105 __skb_push(skb, skb->mac_len);
Daniel Borkmann6aaae2b2017-09-25 02:25:50 +0200106 bpf_compute_data_pointers(skb);
Alexei Starovoitov34312052015-06-04 10:11:53 -0700107 filter_res = BPF_PROG_RUN(prog->filter, skb);
108 __skb_pull(skb, skb->mac_len);
109 } else {
Daniel Borkmann6aaae2b2017-09-25 02:25:50 +0200110 bpf_compute_data_pointers(skb);
Alexei Starovoitov34312052015-06-04 10:11:53 -0700111 filter_res = BPF_PROG_RUN(prog->filter, skb);
112 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100113
Daniel Borkmann045efa82015-09-15 23:05:42 -0700114 if (prog->exts_integrated) {
Daniel Borkmann3a461da2016-03-15 22:41:22 +0100115 res->class = 0;
116 res->classid = TC_H_MAJ(prog->res.classid) |
117 qdisc_skb_cb(skb)->tc_classid;
Daniel Borkmann045efa82015-09-15 23:05:42 -0700118
119 ret = cls_bpf_exec_opcode(filter_res);
120 if (ret == TC_ACT_UNSPEC)
121 continue;
122 break;
123 }
124
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100125 if (filter_res == 0)
126 continue;
Daniel Borkmann3a461da2016-03-15 22:41:22 +0100127 if (filter_res != -1) {
128 res->class = 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100129 res->classid = filter_res;
Daniel Borkmann3a461da2016-03-15 22:41:22 +0100130 } else {
131 *res = prog->res;
132 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100133
134 ret = tcf_exts_exec(skb, &prog->exts, res);
135 if (ret < 0)
136 continue;
137
Daniel Borkmann54720df2015-03-12 20:03:12 +0100138 break;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100139 }
Daniel Borkmann54720df2015-03-12 20:03:12 +0100140 rcu_read_unlock();
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100141
Daniel Borkmann54720df2015-03-12 20:03:12 +0100142 return ret;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100143}
144
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100145static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
146{
147 return !prog->bpf_ops;
148}
149
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100150static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
151 enum tc_clsbpf_command cmd)
152{
Jiri Pirko3f7889c2017-10-19 15:50:36 +0200153 bool addorrep = cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE;
Jiri Pirko3f7889c2017-10-19 15:50:36 +0200154 struct tcf_block *block = tp->chain->block;
155 bool skip_sw = tc_skip_sw(prog->gen_flags);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200156 struct tc_cls_bpf_offload cls_bpf = {};
Or Gerlitz5cecb6c2017-02-16 10:31:16 +0200157 int err;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100158
Jiri Pirkode4784c2017-08-07 10:15:32 +0200159 tc_cls_common_offload_init(&cls_bpf.common, tp);
160 cls_bpf.command = cmd;
161 cls_bpf.exts = &prog->exts;
162 cls_bpf.prog = prog->filter;
163 cls_bpf.name = prog->bpf_name;
164 cls_bpf.exts_integrated = prog->exts_integrated;
165 cls_bpf.gen_flags = prog->gen_flags;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100166
Jiri Pirko3f7889c2017-10-19 15:50:36 +0200167 err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
168 if (addorrep) {
169 if (err < 0) {
170 cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
171 return err;
172 } else if (err > 0) {
173 prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
174 }
175 }
176
Colin Ian King53b38472017-11-02 20:04:12 +0000177 if (addorrep && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
Jiri Pirko3f7889c2017-10-19 15:50:36 +0200178 return -EINVAL;
179
180 return 0;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100181}
182
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100183static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
184 struct cls_bpf_prog *oldprog)
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100185{
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100186 struct cls_bpf_prog *obj = prog;
187 enum tc_clsbpf_command cmd;
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100188 bool skip_sw;
189 int ret;
190
191 skip_sw = tc_skip_sw(prog->gen_flags) ||
192 (oldprog && tc_skip_sw(oldprog->gen_flags));
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100193
194 if (oldprog && oldprog->offloaded) {
Jiri Pirko3f7889c2017-10-19 15:50:36 +0200195 if (!tc_skip_hw(prog->gen_flags)) {
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100196 cmd = TC_CLSBPF_REPLACE;
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100197 } else if (!tc_skip_sw(prog->gen_flags)) {
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100198 obj = oldprog;
199 cmd = TC_CLSBPF_DESTROY;
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100200 } else {
201 return -EINVAL;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100202 }
203 } else {
Jiri Pirko3f7889c2017-10-19 15:50:36 +0200204 if (tc_skip_hw(prog->gen_flags))
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100205 return skip_sw ? -EINVAL : 0;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100206 cmd = TC_CLSBPF_ADD;
207 }
208
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100209 ret = cls_bpf_offload_cmd(tp, obj, cmd);
210 if (ret)
Jiri Pirko3f7889c2017-10-19 15:50:36 +0200211 return ret;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100212
213 obj->offloaded = true;
214 if (oldprog)
215 oldprog->offloaded = false;
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100216
217 return 0;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100218}
219
220static void cls_bpf_stop_offload(struct tcf_proto *tp,
221 struct cls_bpf_prog *prog)
222{
223 int err;
224
225 if (!prog->offloaded)
226 return;
227
228 err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
229 if (err) {
230 pr_err("Stopping hardware offload failed: %d\n", err);
231 return;
232 }
233
234 prog->offloaded = false;
235}
236
Jakub Kicinski68d64062016-09-21 11:44:02 +0100237static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
238 struct cls_bpf_prog *prog)
239{
240 if (!prog->offloaded)
241 return;
242
243 cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
244}
245
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100246static int cls_bpf_init(struct tcf_proto *tp)
247{
248 struct cls_bpf_head *head;
249
250 head = kzalloc(sizeof(*head), GFP_KERNEL);
251 if (head == NULL)
252 return -ENOBUFS;
253
John Fastabend1f947bf2014-09-12 20:10:24 -0700254 INIT_LIST_HEAD_RCU(&head->plist);
Cong Wang76cf5462017-09-25 10:13:49 -0700255 idr_init(&head->handle_idr);
John Fastabend1f947bf2014-09-12 20:10:24 -0700256 rcu_assign_pointer(tp->root, head);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100257
258 return 0;
259}
260
Jakub Kicinski25415ce2017-11-27 11:11:41 -0800261static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100262{
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100263 if (cls_bpf_is_ebpf(prog))
264 bpf_prog_put(prog->filter);
265 else
266 bpf_prog_destroy(prog->filter);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100267
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100268 kfree(prog->bpf_name);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100269 kfree(prog->bpf_ops);
Jakub Kicinski25415ce2017-11-27 11:11:41 -0800270}
271
272static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
273{
274 tcf_exts_destroy(&prog->exts);
275 tcf_exts_put_net(&prog->exts);
276
277 cls_bpf_free_parms(prog);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100278 kfree(prog);
279}
280
Cong Wange910af672017-10-26 18:24:30 -0700281static void cls_bpf_delete_prog_work(struct work_struct *work)
282{
283 struct cls_bpf_prog *prog = container_of(work, struct cls_bpf_prog, work);
284
285 rtnl_lock();
286 __cls_bpf_delete_prog(prog);
287 rtnl_unlock();
288}
289
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100290static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
John Fastabend1f947bf2014-09-12 20:10:24 -0700291{
Cong Wange910af672017-10-26 18:24:30 -0700292 struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
293
294 INIT_WORK(&prog->work, cls_bpf_delete_prog_work);
295 tcf_queue_work(&prog->work);
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100296}
John Fastabend1f947bf2014-09-12 20:10:24 -0700297
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100298static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
299{
Cong Wang76cf5462017-09-25 10:13:49 -0700300 struct cls_bpf_head *head = rtnl_dereference(tp->root);
301
302 idr_remove_ext(&head->handle_idr, prog->handle);
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100303 cls_bpf_stop_offload(tp, prog);
304 list_del_rcu(&prog->link);
305 tcf_unbind_filter(tp, &prog->res);
Cong Wangaae2c352017-11-06 13:47:21 -0800306 if (tcf_exts_get_net(&prog->exts))
307 call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
308 else
309 __cls_bpf_delete_prog(prog);
John Fastabend1f947bf2014-09-12 20:10:24 -0700310}
311
WANG Cong8113c092017-08-04 21:31:43 -0700312static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100313{
WANG Cong763dbf62017-04-19 14:21:21 -0700314 struct cls_bpf_head *head = rtnl_dereference(tp->root);
315
WANG Cong8113c092017-08-04 21:31:43 -0700316 __cls_bpf_delete(tp, arg);
WANG Cong763dbf62017-04-19 14:21:21 -0700317 *last = list_empty(&head->plist);
Jiri Pirko472f5832014-12-02 18:00:32 +0100318 return 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100319}
320
WANG Cong763dbf62017-04-19 14:21:21 -0700321static void cls_bpf_destroy(struct tcf_proto *tp)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100322{
John Fastabend1f947bf2014-09-12 20:10:24 -0700323 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100324 struct cls_bpf_prog *prog, *tmp;
325
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100326 list_for_each_entry_safe(prog, tmp, &head->plist, link)
327 __cls_bpf_delete(tp, prog);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100328
Cong Wang76cf5462017-09-25 10:13:49 -0700329 idr_destroy(&head->handle_idr);
John Fastabend1f947bf2014-09-12 20:10:24 -0700330 kfree_rcu(head, rcu);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100331}
332
WANG Cong8113c092017-08-04 21:31:43 -0700333static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100334{
John Fastabend1f947bf2014-09-12 20:10:24 -0700335 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100336 struct cls_bpf_prog *prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100337
Jiri Pirko3fe6b492014-12-02 18:00:33 +0100338 list_for_each_entry(prog, &head->plist, link) {
WANG Cong8113c092017-08-04 21:31:43 -0700339 if (prog->handle == handle)
340 return prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100341 }
342
WANG Cong8113c092017-08-04 21:31:43 -0700343 return NULL;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100344}
345
Daniel Borkmann045efa82015-09-15 23:05:42 -0700346static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100347{
348 struct sock_filter *bpf_ops;
349 struct sock_fprog_kern fprog_tmp;
350 struct bpf_prog *fp;
351 u16 bpf_size, bpf_num_ops;
352 int ret;
353
354 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
355 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
356 return -EINVAL;
357
358 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
359 if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
360 return -EINVAL;
361
362 bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
363 if (bpf_ops == NULL)
364 return -ENOMEM;
365
366 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
367
368 fprog_tmp.len = bpf_num_ops;
369 fprog_tmp.filter = bpf_ops;
370
371 ret = bpf_prog_create(&fp, &fprog_tmp);
372 if (ret < 0) {
373 kfree(bpf_ops);
374 return ret;
375 }
376
377 prog->bpf_ops = bpf_ops;
378 prog->bpf_num_ops = bpf_num_ops;
379 prog->bpf_name = NULL;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100380 prog->filter = fp;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100381
382 return 0;
383}
384
Daniel Borkmannc46646d2015-09-30 01:41:51 +0200385static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
Jakub Kicinski6c8dfe22017-11-03 13:56:21 -0700386 u32 gen_flags, const struct tcf_proto *tp)
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100387{
388 struct bpf_prog *fp;
389 char *name = NULL;
Jakub Kicinski288b3de2017-11-20 15:21:54 -0800390 bool skip_sw;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100391 u32 bpf_fd;
392
393 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
Jakub Kicinski288b3de2017-11-20 15:21:54 -0800394 skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100395
Jakub Kicinski288b3de2017-11-20 15:21:54 -0800396 fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100397 if (IS_ERR(fp))
398 return PTR_ERR(fp);
399
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100400 if (tb[TCA_BPF_NAME]) {
Thomas Grafb15ca182016-10-26 10:53:16 +0200401 name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100402 if (!name) {
403 bpf_prog_put(fp);
404 return -ENOMEM;
405 }
406 }
407
408 prog->bpf_ops = NULL;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100409 prog->bpf_name = name;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100410 prog->filter = fp;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100411
Daniel Borkmann1f211a12016-01-07 22:29:47 +0100412 if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
Daniel Borkmannc46646d2015-09-30 01:41:51 +0200413 netif_keep_dst(qdisc_dev(tp->q));
414
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100415 return 0;
416}
417
Jiri Pirko6a725c42017-08-04 14:29:04 +0200418static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
419 struct cls_bpf_prog *prog, unsigned long base,
420 struct nlattr **tb, struct nlattr *est, bool ovr)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100421{
Daniel Borkmann045efa82015-09-15 23:05:42 -0700422 bool is_bpf, is_ebpf, have_exts = false;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100423 u32 gen_flags = 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100424 int ret;
425
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100426 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
427 is_ebpf = tb[TCA_BPF_FD];
Daniel Borkmannef146fa2015-09-23 21:56:47 +0200428 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100429 return -EINVAL;
430
Jiri Pirko6839da32017-08-04 14:29:10 +0200431 ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100432 if (ret < 0)
433 return ret;
434
Daniel Borkmann045efa82015-09-15 23:05:42 -0700435 if (tb[TCA_BPF_FLAGS]) {
436 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100437
Jiri Pirko6839da32017-08-04 14:29:10 +0200438 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
439 return -EINVAL;
Daniel Borkmann045efa82015-09-15 23:05:42 -0700440
441 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
442 }
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100443 if (tb[TCA_BPF_FLAGS_GEN]) {
444 gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
445 if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
Jiri Pirko6839da32017-08-04 14:29:10 +0200446 !tc_flags_valid(gen_flags))
447 return -EINVAL;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100448 }
Daniel Borkmann045efa82015-09-15 23:05:42 -0700449
Daniel Borkmann045efa82015-09-15 23:05:42 -0700450 prog->exts_integrated = have_exts;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100451 prog->gen_flags = gen_flags;
Daniel Borkmann045efa82015-09-15 23:05:42 -0700452
453 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
Jakub Kicinski6c8dfe22017-11-03 13:56:21 -0700454 cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
WANG Congb9a24bb2016-08-19 12:36:54 -0700455 if (ret < 0)
Jiri Pirko6839da32017-08-04 14:29:10 +0200456 return ret;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100457
Daniel Borkmannef146fa2015-09-23 21:56:47 +0200458 if (tb[TCA_BPF_CLASSID]) {
459 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
460 tcf_bind_filter(tp, &prog->res, base);
461 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100462
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100463 return 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100464}
465
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100466static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
467 struct tcf_proto *tp, unsigned long base,
468 u32 handle, struct nlattr **tca,
WANG Cong8113c092017-08-04 21:31:43 -0700469 void **arg, bool ovr)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100470{
John Fastabend1f947bf2014-09-12 20:10:24 -0700471 struct cls_bpf_head *head = rtnl_dereference(tp->root);
WANG Cong8113c092017-08-04 21:31:43 -0700472 struct cls_bpf_prog *oldprog = *arg;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100473 struct nlattr *tb[TCA_BPF_MAX + 1];
John Fastabend1f947bf2014-09-12 20:10:24 -0700474 struct cls_bpf_prog *prog;
Cong Wang76cf5462017-09-25 10:13:49 -0700475 unsigned long idr_index;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100476 int ret;
477
478 if (tca[TCA_OPTIONS] == NULL)
479 return -EINVAL;
480
Johannes Bergfceb6432017-04-12 14:34:07 +0200481 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy,
482 NULL);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100483 if (ret < 0)
484 return ret;
485
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100486 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
John Fastabend1f947bf2014-09-12 20:10:24 -0700487 if (!prog)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100488 return -ENOBUFS;
489
WANG Congb9a24bb2016-08-19 12:36:54 -0700490 ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
491 if (ret < 0)
492 goto errout;
John Fastabend1f947bf2014-09-12 20:10:24 -0700493
494 if (oldprog) {
495 if (handle && oldprog->handle != handle) {
496 ret = -EINVAL;
497 goto errout;
498 }
499 }
500
Cong Wang76cf5462017-09-25 10:13:49 -0700501 if (handle == 0) {
502 ret = idr_alloc_ext(&head->handle_idr, prog, &idr_index,
503 1, 0x7FFFFFFF, GFP_KERNEL);
504 if (ret)
505 goto errout;
506 prog->handle = idr_index;
507 } else {
508 if (!oldprog) {
509 ret = idr_alloc_ext(&head->handle_idr, prog, &idr_index,
510 handle, handle + 1, GFP_KERNEL);
511 if (ret)
512 goto errout;
513 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100514 prog->handle = handle;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100515 }
516
Jiri Pirko6a725c42017-08-04 14:29:04 +0200517 ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100518 if (ret < 0)
Cong Wang76cf5462017-09-25 10:13:49 -0700519 goto errout_idr;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100520
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100521 ret = cls_bpf_offload(tp, prog, oldprog);
Jakub Kicinski25415ce2017-11-27 11:11:41 -0800522 if (ret)
523 goto errout_parms;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100524
Or Gerlitz5cecb6c2017-02-16 10:31:16 +0200525 if (!tc_in_hw(prog->gen_flags))
526 prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
527
John Fastabend1f947bf2014-09-12 20:10:24 -0700528 if (oldprog) {
Cong Wang76cf5462017-09-25 10:13:49 -0700529 idr_replace_ext(&head->handle_idr, prog, handle);
Daniel Borkmannf6bfc462015-07-17 22:38:43 +0200530 list_replace_rcu(&oldprog->link, &prog->link);
John Fastabend18cdb372014-10-05 21:28:52 -0700531 tcf_unbind_filter(tp, &oldprog->res);
Cong Wangaae2c352017-11-06 13:47:21 -0800532 tcf_exts_get_net(&oldprog->exts);
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100533 call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu);
John Fastabend1f947bf2014-09-12 20:10:24 -0700534 } else {
535 list_add_rcu(&prog->link, &head->plist);
536 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100537
WANG Cong8113c092017-08-04 21:31:43 -0700538 *arg = prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100539 return 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100540
Jakub Kicinski25415ce2017-11-27 11:11:41 -0800541errout_parms:
542 cls_bpf_free_parms(prog);
Cong Wang76cf5462017-09-25 10:13:49 -0700543errout_idr:
544 if (!oldprog)
545 idr_remove_ext(&head->handle_idr, prog->handle);
WANG Congb9a24bb2016-08-19 12:36:54 -0700546errout:
547 tcf_exts_destroy(&prog->exts);
548 kfree(prog);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100549 return ret;
550}
551
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100552static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
553 struct sk_buff *skb)
554{
555 struct nlattr *nla;
556
557 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
558 return -EMSGSIZE;
559
560 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
561 sizeof(struct sock_filter));
562 if (nla == NULL)
563 return -EMSGSIZE;
564
565 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
566
567 return 0;
568}
569
570static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
571 struct sk_buff *skb)
572{
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100573 struct nlattr *nla;
574
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100575 if (prog->bpf_name &&
576 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
577 return -EMSGSIZE;
578
Daniel Borkmanne8628302017-06-21 20:16:11 +0200579 if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
580 return -EMSGSIZE;
581
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100582 nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100583 if (nla == NULL)
584 return -EMSGSIZE;
585
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100586 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100587
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100588 return 0;
589}
590
WANG Cong8113c092017-08-04 21:31:43 -0700591static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100592 struct sk_buff *skb, struct tcmsg *tm)
593{
WANG Cong8113c092017-08-04 21:31:43 -0700594 struct cls_bpf_prog *prog = fh;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100595 struct nlattr *nest;
Daniel Borkmannbf007d12015-09-23 21:56:46 +0200596 u32 bpf_flags = 0;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100597 int ret;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100598
599 if (prog == NULL)
600 return skb->len;
601
602 tm->tcm_handle = prog->handle;
603
Jakub Kicinski68d64062016-09-21 11:44:02 +0100604 cls_bpf_offload_update_stats(tp, prog);
605
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100606 nest = nla_nest_start(skb, TCA_OPTIONS);
607 if (nest == NULL)
608 goto nla_put_failure;
609
Daniel Borkmannef146fa2015-09-23 21:56:47 +0200610 if (prog->res.classid &&
611 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100612 goto nla_put_failure;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100613
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100614 if (cls_bpf_is_ebpf(prog))
615 ret = cls_bpf_dump_ebpf_info(prog, skb);
616 else
617 ret = cls_bpf_dump_bpf_info(prog, skb);
618 if (ret)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100619 goto nla_put_failure;
620
WANG Cong5da57f42013-12-15 20:15:07 -0800621 if (tcf_exts_dump(skb, &prog->exts) < 0)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100622 goto nla_put_failure;
623
Daniel Borkmannbf007d12015-09-23 21:56:46 +0200624 if (prog->exts_integrated)
625 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
626 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
627 goto nla_put_failure;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100628 if (prog->gen_flags &&
629 nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
630 goto nla_put_failure;
Daniel Borkmannbf007d12015-09-23 21:56:46 +0200631
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100632 nla_nest_end(skb, nest);
633
WANG Cong5da57f42013-12-15 20:15:07 -0800634 if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100635 goto nla_put_failure;
636
637 return skb->len;
638
639nla_put_failure:
640 nla_nest_cancel(skb, nest);
641 return -1;
642}
643
Cong Wang07d79fc2017-08-30 14:30:36 -0700644static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl)
645{
646 struct cls_bpf_prog *prog = fh;
647
648 if (prog && prog->res.classid == classid)
649 prog->res.class = cl;
650}
651
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100652static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
653{
John Fastabend1f947bf2014-09-12 20:10:24 -0700654 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100655 struct cls_bpf_prog *prog;
656
Jiri Pirko3fe6b492014-12-02 18:00:33 +0100657 list_for_each_entry(prog, &head->plist, link) {
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100658 if (arg->count < arg->skip)
659 goto skip;
WANG Cong8113c092017-08-04 21:31:43 -0700660 if (arg->fn(tp, prog, arg) < 0) {
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100661 arg->stop = 1;
662 break;
663 }
664skip:
665 arg->count++;
666 }
667}
668
669static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
670 .kind = "bpf",
671 .owner = THIS_MODULE,
672 .classify = cls_bpf_classify,
673 .init = cls_bpf_init,
674 .destroy = cls_bpf_destroy,
675 .get = cls_bpf_get,
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100676 .change = cls_bpf_change,
677 .delete = cls_bpf_delete,
678 .walk = cls_bpf_walk,
679 .dump = cls_bpf_dump,
Cong Wang07d79fc2017-08-30 14:30:36 -0700680 .bind_class = cls_bpf_bind_class,
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100681};
682
683static int __init cls_bpf_init_mod(void)
684{
685 return register_tcf_proto_ops(&cls_bpf_ops);
686}
687
688static void __exit cls_bpf_exit_mod(void)
689{
690 unregister_tcf_proto_ops(&cls_bpf_ops);
691}
692
693module_init(cls_bpf_init_mod);
694module_exit(cls_bpf_exit_mod);