Alexei Starovoitov | d0003ec | 2014-11-13 17:36:49 -0800 | [diff] [blame] | 1 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
| 2 | * |
| 3 | * This program is free software; you can redistribute it and/or |
| 4 | * modify it under the terms of version 2 of the GNU General Public |
| 5 | * License as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, but |
| 8 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 10 | * General Public License for more details. |
| 11 | */ |
| 12 | #include <linux/bpf.h> |
| 13 | #include <linux/rcupdate.h> |
Daniel Borkmann | 03e69b5 | 2015-03-14 02:27:16 +0100 | [diff] [blame] | 14 | #include <linux/random.h> |
Daniel Borkmann | c04167c | 2015-03-14 02:27:17 +0100 | [diff] [blame] | 15 | #include <linux/smp.h> |
Daniel Borkmann | 2d0e30c | 2016-10-21 12:46:33 +0200 | [diff] [blame] | 16 | #include <linux/topology.h> |
Daniel Borkmann | 17ca8cb | 2015-05-29 23:23:06 +0200 | [diff] [blame] | 17 | #include <linux/ktime.h> |
Alexei Starovoitov | ffeedaf | 2015-06-12 19:39:12 -0700 | [diff] [blame] | 18 | #include <linux/sched.h> |
| 19 | #include <linux/uidgid.h> |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 20 | #include <linux/filter.h> |
Andrey Ignatov | d7a4cb9 | 2019-03-18 17:55:26 -0700 | [diff] [blame^] | 21 | #include <linux/ctype.h> |
| 22 | |
| 23 | #include "../../lib/kstrtox.h" |
Alexei Starovoitov | d0003ec | 2014-11-13 17:36:49 -0800 | [diff] [blame] | 24 | |
| 25 | /* If kernel subsystem is allowing eBPF programs to call this function, |
| 26 | * inside its own verifier_ops->get_func_proto() callback it should return |
| 27 | * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments |
| 28 | * |
| 29 | * Different map implementations will rely on rcu in map methods |
| 30 | * lookup/update/delete, therefore eBPF programs must run under rcu lock |
| 31 | * if program is allowed to access maps, so check rcu_read_lock_held in |
| 32 | * all three functions. |
| 33 | */ |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 34 | BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) |
Alexei Starovoitov | d0003ec | 2014-11-13 17:36:49 -0800 | [diff] [blame] | 35 | { |
Alexei Starovoitov | d0003ec | 2014-11-13 17:36:49 -0800 | [diff] [blame] | 36 | WARN_ON_ONCE(!rcu_read_lock_held()); |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 37 | return (unsigned long) map->ops->map_lookup_elem(map, key); |
Alexei Starovoitov | d0003ec | 2014-11-13 17:36:49 -0800 | [diff] [blame] | 38 | } |
| 39 | |
Daniel Borkmann | a2c83ff | 2015-03-01 12:31:42 +0100 | [diff] [blame] | 40 | const struct bpf_func_proto bpf_map_lookup_elem_proto = { |
Daniel Borkmann | 3324b58 | 2015-05-29 23:23:07 +0200 | [diff] [blame] | 41 | .func = bpf_map_lookup_elem, |
| 42 | .gpl_only = false, |
Daniel Borkmann | 36bbef5 | 2016-09-20 00:26:13 +0200 | [diff] [blame] | 43 | .pkt_access = true, |
Daniel Borkmann | 3324b58 | 2015-05-29 23:23:07 +0200 | [diff] [blame] | 44 | .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, |
| 45 | .arg1_type = ARG_CONST_MAP_PTR, |
| 46 | .arg2_type = ARG_PTR_TO_MAP_KEY, |
Alexei Starovoitov | d0003ec | 2014-11-13 17:36:49 -0800 | [diff] [blame] | 47 | }; |
| 48 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 49 | BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, |
| 50 | void *, value, u64, flags) |
Alexei Starovoitov | d0003ec | 2014-11-13 17:36:49 -0800 | [diff] [blame] | 51 | { |
Alexei Starovoitov | d0003ec | 2014-11-13 17:36:49 -0800 | [diff] [blame] | 52 | WARN_ON_ONCE(!rcu_read_lock_held()); |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 53 | return map->ops->map_update_elem(map, key, value, flags); |
Alexei Starovoitov | d0003ec | 2014-11-13 17:36:49 -0800 | [diff] [blame] | 54 | } |
| 55 | |
Daniel Borkmann | a2c83ff | 2015-03-01 12:31:42 +0100 | [diff] [blame] | 56 | const struct bpf_func_proto bpf_map_update_elem_proto = { |
Daniel Borkmann | 3324b58 | 2015-05-29 23:23:07 +0200 | [diff] [blame] | 57 | .func = bpf_map_update_elem, |
| 58 | .gpl_only = false, |
Daniel Borkmann | 36bbef5 | 2016-09-20 00:26:13 +0200 | [diff] [blame] | 59 | .pkt_access = true, |
Daniel Borkmann | 3324b58 | 2015-05-29 23:23:07 +0200 | [diff] [blame] | 60 | .ret_type = RET_INTEGER, |
| 61 | .arg1_type = ARG_CONST_MAP_PTR, |
| 62 | .arg2_type = ARG_PTR_TO_MAP_KEY, |
| 63 | .arg3_type = ARG_PTR_TO_MAP_VALUE, |
| 64 | .arg4_type = ARG_ANYTHING, |
Alexei Starovoitov | d0003ec | 2014-11-13 17:36:49 -0800 | [diff] [blame] | 65 | }; |
| 66 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 67 | BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) |
Alexei Starovoitov | d0003ec | 2014-11-13 17:36:49 -0800 | [diff] [blame] | 68 | { |
Alexei Starovoitov | d0003ec | 2014-11-13 17:36:49 -0800 | [diff] [blame] | 69 | WARN_ON_ONCE(!rcu_read_lock_held()); |
Alexei Starovoitov | d0003ec | 2014-11-13 17:36:49 -0800 | [diff] [blame] | 70 | return map->ops->map_delete_elem(map, key); |
| 71 | } |
| 72 | |
Daniel Borkmann | a2c83ff | 2015-03-01 12:31:42 +0100 | [diff] [blame] | 73 | const struct bpf_func_proto bpf_map_delete_elem_proto = { |
Daniel Borkmann | 3324b58 | 2015-05-29 23:23:07 +0200 | [diff] [blame] | 74 | .func = bpf_map_delete_elem, |
| 75 | .gpl_only = false, |
Daniel Borkmann | 36bbef5 | 2016-09-20 00:26:13 +0200 | [diff] [blame] | 76 | .pkt_access = true, |
Daniel Borkmann | 3324b58 | 2015-05-29 23:23:07 +0200 | [diff] [blame] | 77 | .ret_type = RET_INTEGER, |
| 78 | .arg1_type = ARG_CONST_MAP_PTR, |
| 79 | .arg2_type = ARG_PTR_TO_MAP_KEY, |
Alexei Starovoitov | d0003ec | 2014-11-13 17:36:49 -0800 | [diff] [blame] | 80 | }; |
Daniel Borkmann | 03e69b5 | 2015-03-14 02:27:16 +0100 | [diff] [blame] | 81 | |
Mauricio Vasquez B | f1a2e44 | 2018-10-18 15:16:25 +0200 | [diff] [blame] | 82 | BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags) |
| 83 | { |
| 84 | return map->ops->map_push_elem(map, value, flags); |
| 85 | } |
| 86 | |
| 87 | const struct bpf_func_proto bpf_map_push_elem_proto = { |
| 88 | .func = bpf_map_push_elem, |
| 89 | .gpl_only = false, |
| 90 | .pkt_access = true, |
| 91 | .ret_type = RET_INTEGER, |
| 92 | .arg1_type = ARG_CONST_MAP_PTR, |
| 93 | .arg2_type = ARG_PTR_TO_MAP_VALUE, |
| 94 | .arg3_type = ARG_ANYTHING, |
| 95 | }; |
| 96 | |
| 97 | BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value) |
| 98 | { |
| 99 | return map->ops->map_pop_elem(map, value); |
| 100 | } |
| 101 | |
| 102 | const struct bpf_func_proto bpf_map_pop_elem_proto = { |
| 103 | .func = bpf_map_pop_elem, |
| 104 | .gpl_only = false, |
Mauricio Vasquez B | f1a2e44 | 2018-10-18 15:16:25 +0200 | [diff] [blame] | 105 | .ret_type = RET_INTEGER, |
| 106 | .arg1_type = ARG_CONST_MAP_PTR, |
| 107 | .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE, |
| 108 | }; |
| 109 | |
| 110 | BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) |
| 111 | { |
| 112 | return map->ops->map_peek_elem(map, value); |
| 113 | } |
| 114 | |
| 115 | const struct bpf_func_proto bpf_map_peek_elem_proto = { |
| 116 | .func = bpf_map_pop_elem, |
| 117 | .gpl_only = false, |
Mauricio Vasquez B | f1a2e44 | 2018-10-18 15:16:25 +0200 | [diff] [blame] | 118 | .ret_type = RET_INTEGER, |
| 119 | .arg1_type = ARG_CONST_MAP_PTR, |
| 120 | .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE, |
| 121 | }; |
| 122 | |
Daniel Borkmann | 03e69b5 | 2015-03-14 02:27:16 +0100 | [diff] [blame] | 123 | const struct bpf_func_proto bpf_get_prandom_u32_proto = { |
Daniel Borkmann | 3ad0040 | 2015-10-08 01:20:39 +0200 | [diff] [blame] | 124 | .func = bpf_user_rnd_u32, |
Daniel Borkmann | 03e69b5 | 2015-03-14 02:27:16 +0100 | [diff] [blame] | 125 | .gpl_only = false, |
| 126 | .ret_type = RET_INTEGER, |
| 127 | }; |
Daniel Borkmann | c04167c | 2015-03-14 02:27:17 +0100 | [diff] [blame] | 128 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 129 | BPF_CALL_0(bpf_get_smp_processor_id) |
Daniel Borkmann | c04167c | 2015-03-14 02:27:17 +0100 | [diff] [blame] | 130 | { |
Daniel Borkmann | 80b48c4 | 2016-06-28 12:18:26 +0200 | [diff] [blame] | 131 | return smp_processor_id(); |
Daniel Borkmann | c04167c | 2015-03-14 02:27:17 +0100 | [diff] [blame] | 132 | } |
| 133 | |
| 134 | const struct bpf_func_proto bpf_get_smp_processor_id_proto = { |
| 135 | .func = bpf_get_smp_processor_id, |
| 136 | .gpl_only = false, |
| 137 | .ret_type = RET_INTEGER, |
| 138 | }; |
Daniel Borkmann | 17ca8cb | 2015-05-29 23:23:06 +0200 | [diff] [blame] | 139 | |
Daniel Borkmann | 2d0e30c | 2016-10-21 12:46:33 +0200 | [diff] [blame] | 140 | BPF_CALL_0(bpf_get_numa_node_id) |
| 141 | { |
| 142 | return numa_node_id(); |
| 143 | } |
| 144 | |
| 145 | const struct bpf_func_proto bpf_get_numa_node_id_proto = { |
| 146 | .func = bpf_get_numa_node_id, |
| 147 | .gpl_only = false, |
| 148 | .ret_type = RET_INTEGER, |
| 149 | }; |
| 150 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 151 | BPF_CALL_0(bpf_ktime_get_ns) |
Daniel Borkmann | 17ca8cb | 2015-05-29 23:23:06 +0200 | [diff] [blame] | 152 | { |
| 153 | /* NMI safe access to clock monotonic */ |
| 154 | return ktime_get_mono_fast_ns(); |
| 155 | } |
| 156 | |
| 157 | const struct bpf_func_proto bpf_ktime_get_ns_proto = { |
| 158 | .func = bpf_ktime_get_ns, |
| 159 | .gpl_only = true, |
| 160 | .ret_type = RET_INTEGER, |
| 161 | }; |
Alexei Starovoitov | ffeedaf | 2015-06-12 19:39:12 -0700 | [diff] [blame] | 162 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 163 | BPF_CALL_0(bpf_get_current_pid_tgid) |
Alexei Starovoitov | ffeedaf | 2015-06-12 19:39:12 -0700 | [diff] [blame] | 164 | { |
| 165 | struct task_struct *task = current; |
| 166 | |
Daniel Borkmann | 6088b58 | 2016-09-09 02:45:28 +0200 | [diff] [blame] | 167 | if (unlikely(!task)) |
Alexei Starovoitov | ffeedaf | 2015-06-12 19:39:12 -0700 | [diff] [blame] | 168 | return -EINVAL; |
| 169 | |
| 170 | return (u64) task->tgid << 32 | task->pid; |
| 171 | } |
| 172 | |
| 173 | const struct bpf_func_proto bpf_get_current_pid_tgid_proto = { |
| 174 | .func = bpf_get_current_pid_tgid, |
| 175 | .gpl_only = false, |
| 176 | .ret_type = RET_INTEGER, |
| 177 | }; |
| 178 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 179 | BPF_CALL_0(bpf_get_current_uid_gid) |
Alexei Starovoitov | ffeedaf | 2015-06-12 19:39:12 -0700 | [diff] [blame] | 180 | { |
| 181 | struct task_struct *task = current; |
| 182 | kuid_t uid; |
| 183 | kgid_t gid; |
| 184 | |
Daniel Borkmann | 6088b58 | 2016-09-09 02:45:28 +0200 | [diff] [blame] | 185 | if (unlikely(!task)) |
Alexei Starovoitov | ffeedaf | 2015-06-12 19:39:12 -0700 | [diff] [blame] | 186 | return -EINVAL; |
| 187 | |
| 188 | current_uid_gid(&uid, &gid); |
| 189 | return (u64) from_kgid(&init_user_ns, gid) << 32 | |
Daniel Borkmann | 6088b58 | 2016-09-09 02:45:28 +0200 | [diff] [blame] | 190 | from_kuid(&init_user_ns, uid); |
Alexei Starovoitov | ffeedaf | 2015-06-12 19:39:12 -0700 | [diff] [blame] | 191 | } |
| 192 | |
| 193 | const struct bpf_func_proto bpf_get_current_uid_gid_proto = { |
| 194 | .func = bpf_get_current_uid_gid, |
| 195 | .gpl_only = false, |
| 196 | .ret_type = RET_INTEGER, |
| 197 | }; |
| 198 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 199 | BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size) |
Alexei Starovoitov | ffeedaf | 2015-06-12 19:39:12 -0700 | [diff] [blame] | 200 | { |
| 201 | struct task_struct *task = current; |
Alexei Starovoitov | ffeedaf | 2015-06-12 19:39:12 -0700 | [diff] [blame] | 202 | |
Daniel Borkmann | 074f528e | 2016-04-13 00:10:52 +0200 | [diff] [blame] | 203 | if (unlikely(!task)) |
| 204 | goto err_clear; |
Alexei Starovoitov | ffeedaf | 2015-06-12 19:39:12 -0700 | [diff] [blame] | 205 | |
Daniel Borkmann | 074f528e | 2016-04-13 00:10:52 +0200 | [diff] [blame] | 206 | strncpy(buf, task->comm, size); |
| 207 | |
| 208 | /* Verifier guarantees that size > 0. For task->comm exceeding |
| 209 | * size, guarantee that buf is %NUL-terminated. Unconditionally |
| 210 | * done here to save the size test. |
| 211 | */ |
| 212 | buf[size - 1] = 0; |
Alexei Starovoitov | ffeedaf | 2015-06-12 19:39:12 -0700 | [diff] [blame] | 213 | return 0; |
Daniel Borkmann | 074f528e | 2016-04-13 00:10:52 +0200 | [diff] [blame] | 214 | err_clear: |
| 215 | memset(buf, 0, size); |
| 216 | return -EINVAL; |
Alexei Starovoitov | ffeedaf | 2015-06-12 19:39:12 -0700 | [diff] [blame] | 217 | } |
| 218 | |
| 219 | const struct bpf_func_proto bpf_get_current_comm_proto = { |
| 220 | .func = bpf_get_current_comm, |
| 221 | .gpl_only = false, |
| 222 | .ret_type = RET_INTEGER, |
Alexei Starovoitov | 39f19ebb | 2017-01-09 10:19:50 -0800 | [diff] [blame] | 223 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
| 224 | .arg2_type = ARG_CONST_SIZE, |
Alexei Starovoitov | ffeedaf | 2015-06-12 19:39:12 -0700 | [diff] [blame] | 225 | }; |
Yonghong Song | bf6fa2c8 | 2018-06-03 15:59:41 -0700 | [diff] [blame] | 226 | |
Alexei Starovoitov | d83525c | 2019-01-31 15:40:04 -0800 | [diff] [blame] | 227 | #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK) |
| 228 | |
| 229 | static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) |
| 230 | { |
| 231 | arch_spinlock_t *l = (void *)lock; |
| 232 | union { |
| 233 | __u32 val; |
| 234 | arch_spinlock_t lock; |
| 235 | } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED }; |
| 236 | |
| 237 | compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0"); |
| 238 | BUILD_BUG_ON(sizeof(*l) != sizeof(__u32)); |
| 239 | BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32)); |
| 240 | arch_spin_lock(l); |
| 241 | } |
| 242 | |
| 243 | static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) |
| 244 | { |
| 245 | arch_spinlock_t *l = (void *)lock; |
| 246 | |
| 247 | arch_spin_unlock(l); |
| 248 | } |
| 249 | |
| 250 | #else |
| 251 | |
| 252 | static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) |
| 253 | { |
| 254 | atomic_t *l = (void *)lock; |
| 255 | |
| 256 | BUILD_BUG_ON(sizeof(*l) != sizeof(*lock)); |
| 257 | do { |
| 258 | atomic_cond_read_relaxed(l, !VAL); |
| 259 | } while (atomic_xchg(l, 1)); |
| 260 | } |
| 261 | |
| 262 | static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) |
| 263 | { |
| 264 | atomic_t *l = (void *)lock; |
| 265 | |
| 266 | atomic_set_release(l, 0); |
| 267 | } |
| 268 | |
| 269 | #endif |
| 270 | |
| 271 | static DEFINE_PER_CPU(unsigned long, irqsave_flags); |
| 272 | |
| 273 | notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock) |
| 274 | { |
| 275 | unsigned long flags; |
| 276 | |
| 277 | local_irq_save(flags); |
| 278 | __bpf_spin_lock(lock); |
| 279 | __this_cpu_write(irqsave_flags, flags); |
| 280 | return 0; |
| 281 | } |
| 282 | |
| 283 | const struct bpf_func_proto bpf_spin_lock_proto = { |
| 284 | .func = bpf_spin_lock, |
| 285 | .gpl_only = false, |
| 286 | .ret_type = RET_VOID, |
| 287 | .arg1_type = ARG_PTR_TO_SPIN_LOCK, |
| 288 | }; |
| 289 | |
| 290 | notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock) |
| 291 | { |
| 292 | unsigned long flags; |
| 293 | |
| 294 | flags = __this_cpu_read(irqsave_flags); |
| 295 | __bpf_spin_unlock(lock); |
| 296 | local_irq_restore(flags); |
| 297 | return 0; |
| 298 | } |
| 299 | |
| 300 | const struct bpf_func_proto bpf_spin_unlock_proto = { |
| 301 | .func = bpf_spin_unlock, |
| 302 | .gpl_only = false, |
| 303 | .ret_type = RET_VOID, |
| 304 | .arg1_type = ARG_PTR_TO_SPIN_LOCK, |
| 305 | }; |
| 306 | |
Alexei Starovoitov | 96049f3 | 2019-01-31 15:40:09 -0800 | [diff] [blame] | 307 | void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, |
| 308 | bool lock_src) |
| 309 | { |
| 310 | struct bpf_spin_lock *lock; |
| 311 | |
| 312 | if (lock_src) |
| 313 | lock = src + map->spin_lock_off; |
| 314 | else |
| 315 | lock = dst + map->spin_lock_off; |
| 316 | preempt_disable(); |
| 317 | ____bpf_spin_lock(lock); |
| 318 | copy_map_value(map, dst, src); |
| 319 | ____bpf_spin_unlock(lock); |
| 320 | preempt_enable(); |
| 321 | } |
| 322 | |
Yonghong Song | bf6fa2c8 | 2018-06-03 15:59:41 -0700 | [diff] [blame] | 323 | #ifdef CONFIG_CGROUPS |
| 324 | BPF_CALL_0(bpf_get_current_cgroup_id) |
| 325 | { |
| 326 | struct cgroup *cgrp = task_dfl_cgroup(current); |
| 327 | |
| 328 | return cgrp->kn->id.id; |
| 329 | } |
| 330 | |
| 331 | const struct bpf_func_proto bpf_get_current_cgroup_id_proto = { |
| 332 | .func = bpf_get_current_cgroup_id, |
| 333 | .gpl_only = false, |
| 334 | .ret_type = RET_INTEGER, |
| 335 | }; |
Roman Gushchin | cd33943 | 2018-08-02 14:27:24 -0700 | [diff] [blame] | 336 | |
Roman Gushchin | 8bad74f | 2018-09-28 14:45:36 +0000 | [diff] [blame] | 337 | #ifdef CONFIG_CGROUP_BPF |
Roman Gushchin | f294b37 | 2018-09-28 14:45:40 +0000 | [diff] [blame] | 338 | DECLARE_PER_CPU(struct bpf_cgroup_storage*, |
| 339 | bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); |
Roman Gushchin | cd33943 | 2018-08-02 14:27:24 -0700 | [diff] [blame] | 340 | |
| 341 | BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags) |
| 342 | { |
Roman Gushchin | 8bad74f | 2018-09-28 14:45:36 +0000 | [diff] [blame] | 343 | /* flags argument is not used now, |
| 344 | * but provides an ability to extend the API. |
| 345 | * verifier checks that its value is correct. |
Roman Gushchin | cd33943 | 2018-08-02 14:27:24 -0700 | [diff] [blame] | 346 | */ |
Roman Gushchin | 8bad74f | 2018-09-28 14:45:36 +0000 | [diff] [blame] | 347 | enum bpf_cgroup_storage_type stype = cgroup_storage_type(map); |
Roman Gushchin | f294b37 | 2018-09-28 14:45:40 +0000 | [diff] [blame] | 348 | struct bpf_cgroup_storage *storage; |
Roman Gushchin | b741f16 | 2018-09-28 14:45:43 +0000 | [diff] [blame] | 349 | void *ptr; |
Roman Gushchin | 8bad74f | 2018-09-28 14:45:36 +0000 | [diff] [blame] | 350 | |
Roman Gushchin | f294b37 | 2018-09-28 14:45:40 +0000 | [diff] [blame] | 351 | storage = this_cpu_read(bpf_cgroup_storage[stype]); |
| 352 | |
Roman Gushchin | b741f16 | 2018-09-28 14:45:43 +0000 | [diff] [blame] | 353 | if (stype == BPF_CGROUP_STORAGE_SHARED) |
| 354 | ptr = &READ_ONCE(storage->buf)->data[0]; |
| 355 | else |
| 356 | ptr = this_cpu_ptr(storage->percpu_buf); |
| 357 | |
| 358 | return (unsigned long)ptr; |
Roman Gushchin | cd33943 | 2018-08-02 14:27:24 -0700 | [diff] [blame] | 359 | } |
| 360 | |
| 361 | const struct bpf_func_proto bpf_get_local_storage_proto = { |
| 362 | .func = bpf_get_local_storage, |
| 363 | .gpl_only = false, |
| 364 | .ret_type = RET_PTR_TO_MAP_VALUE, |
| 365 | .arg1_type = ARG_CONST_MAP_PTR, |
| 366 | .arg2_type = ARG_ANYTHING, |
| 367 | }; |
Yonghong Song | bf6fa2c8 | 2018-06-03 15:59:41 -0700 | [diff] [blame] | 368 | #endif |
Andrey Ignatov | d7a4cb9 | 2019-03-18 17:55:26 -0700 | [diff] [blame^] | 369 | |
| 370 | #define BPF_STRTOX_BASE_MASK 0x1F |
| 371 | |
| 372 | static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags, |
| 373 | unsigned long long *res, bool *is_negative) |
| 374 | { |
| 375 | unsigned int base = flags & BPF_STRTOX_BASE_MASK; |
| 376 | const char *cur_buf = buf; |
| 377 | size_t cur_len = buf_len; |
| 378 | unsigned int consumed; |
| 379 | size_t val_len; |
| 380 | char str[64]; |
| 381 | |
| 382 | if (!buf || !buf_len || !res || !is_negative) |
| 383 | return -EINVAL; |
| 384 | |
| 385 | if (base != 0 && base != 8 && base != 10 && base != 16) |
| 386 | return -EINVAL; |
| 387 | |
| 388 | if (flags & ~BPF_STRTOX_BASE_MASK) |
| 389 | return -EINVAL; |
| 390 | |
| 391 | while (cur_buf < buf + buf_len && isspace(*cur_buf)) |
| 392 | ++cur_buf; |
| 393 | |
| 394 | *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-'); |
| 395 | if (*is_negative) |
| 396 | ++cur_buf; |
| 397 | |
| 398 | consumed = cur_buf - buf; |
| 399 | cur_len -= consumed; |
| 400 | if (!cur_len) |
| 401 | return -EINVAL; |
| 402 | |
| 403 | cur_len = min(cur_len, sizeof(str) - 1); |
| 404 | memcpy(str, cur_buf, cur_len); |
| 405 | str[cur_len] = '\0'; |
| 406 | cur_buf = str; |
| 407 | |
| 408 | cur_buf = _parse_integer_fixup_radix(cur_buf, &base); |
| 409 | val_len = _parse_integer(cur_buf, base, res); |
| 410 | |
| 411 | if (val_len & KSTRTOX_OVERFLOW) |
| 412 | return -ERANGE; |
| 413 | |
| 414 | if (val_len == 0) |
| 415 | return -EINVAL; |
| 416 | |
| 417 | cur_buf += val_len; |
| 418 | consumed += cur_buf - str; |
| 419 | |
| 420 | return consumed; |
| 421 | } |
| 422 | |
| 423 | static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags, |
| 424 | long long *res) |
| 425 | { |
| 426 | unsigned long long _res; |
| 427 | bool is_negative; |
| 428 | int err; |
| 429 | |
| 430 | err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); |
| 431 | if (err < 0) |
| 432 | return err; |
| 433 | if (is_negative) { |
| 434 | if ((long long)-_res > 0) |
| 435 | return -ERANGE; |
| 436 | *res = -_res; |
| 437 | } else { |
| 438 | if ((long long)_res < 0) |
| 439 | return -ERANGE; |
| 440 | *res = _res; |
| 441 | } |
| 442 | return err; |
| 443 | } |
| 444 | |
| 445 | BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags, |
| 446 | long *, res) |
| 447 | { |
| 448 | long long _res; |
| 449 | int err; |
| 450 | |
| 451 | err = __bpf_strtoll(buf, buf_len, flags, &_res); |
| 452 | if (err < 0) |
| 453 | return err; |
| 454 | if (_res != (long)_res) |
| 455 | return -ERANGE; |
| 456 | *res = _res; |
| 457 | return err; |
| 458 | } |
| 459 | |
| 460 | const struct bpf_func_proto bpf_strtol_proto = { |
| 461 | .func = bpf_strtol, |
| 462 | .gpl_only = false, |
| 463 | .ret_type = RET_INTEGER, |
| 464 | .arg1_type = ARG_PTR_TO_MEM, |
| 465 | .arg2_type = ARG_CONST_SIZE, |
| 466 | .arg3_type = ARG_ANYTHING, |
| 467 | .arg4_type = ARG_PTR_TO_LONG, |
| 468 | }; |
| 469 | |
| 470 | BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags, |
| 471 | unsigned long *, res) |
| 472 | { |
| 473 | unsigned long long _res; |
| 474 | bool is_negative; |
| 475 | int err; |
| 476 | |
| 477 | err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); |
| 478 | if (err < 0) |
| 479 | return err; |
| 480 | if (is_negative) |
| 481 | return -EINVAL; |
| 482 | if (_res != (unsigned long)_res) |
| 483 | return -ERANGE; |
| 484 | *res = _res; |
| 485 | return err; |
| 486 | } |
| 487 | |
| 488 | const struct bpf_func_proto bpf_strtoul_proto = { |
| 489 | .func = bpf_strtoul, |
| 490 | .gpl_only = false, |
| 491 | .ret_type = RET_INTEGER, |
| 492 | .arg1_type = ARG_PTR_TO_MEM, |
| 493 | .arg2_type = ARG_CONST_SIZE, |
| 494 | .arg3_type = ARG_ANYTHING, |
| 495 | .arg4_type = ARG_PTR_TO_LONG, |
| 496 | }; |
Roman Gushchin | 8bad74f | 2018-09-28 14:45:36 +0000 | [diff] [blame] | 497 | #endif |