Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 1 | #include "audit.h" |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 2 | #include <linux/fsnotify_backend.h> |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 3 | #include <linux/namei.h> |
| 4 | #include <linux/mount.h> |
Al Viro | 916d757 | 2009-06-24 00:02:38 -0400 | [diff] [blame] | 5 | #include <linux/kthread.h> |
Elena Reshetova | 9d2378f | 2017-05-02 10:16:04 -0400 | [diff] [blame^] | 6 | #include <linux/refcount.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 7 | #include <linux/slab.h> |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 8 | |
| 9 | struct audit_tree; |
| 10 | struct audit_chunk; |
| 11 | |
| 12 | struct audit_tree { |
Elena Reshetova | 9d2378f | 2017-05-02 10:16:04 -0400 | [diff] [blame^] | 13 | refcount_t count; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 14 | int goner; |
| 15 | struct audit_chunk *root; |
| 16 | struct list_head chunks; |
| 17 | struct list_head rules; |
| 18 | struct list_head list; |
| 19 | struct list_head same_root; |
| 20 | struct rcu_head head; |
| 21 | char pathname[]; |
| 22 | }; |
| 23 | |
| 24 | struct audit_chunk { |
| 25 | struct list_head hash; |
Eric Paris | e61ce86 | 2009-12-17 21:24:24 -0500 | [diff] [blame] | 26 | struct fsnotify_mark mark; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 27 | struct list_head trees; /* with root here */ |
| 28 | int dead; |
| 29 | int count; |
Al Viro | 8f7b0ba | 2008-11-15 01:15:43 +0000 | [diff] [blame] | 30 | atomic_long_t refs; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 31 | struct rcu_head head; |
| 32 | struct node { |
| 33 | struct list_head list; |
| 34 | struct audit_tree *owner; |
| 35 | unsigned index; /* index; upper bit indicates 'will prune' */ |
| 36 | } owners[]; |
| 37 | }; |
| 38 | |
| 39 | static LIST_HEAD(tree_list); |
| 40 | static LIST_HEAD(prune_list); |
Imre Palik | f1aaf26 | 2015-02-23 15:37:59 -0500 | [diff] [blame] | 41 | static struct task_struct *prune_thread; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 42 | |
| 43 | /* |
| 44 | * One struct chunk is attached to each inode of interest. |
| 45 | * We replace struct chunk on tagging/untagging. |
| 46 | * Rules have pointer to struct audit_tree. |
| 47 | * Rules have struct list_head rlist forming a list of rules over |
| 48 | * the same tree. |
| 49 | * References to struct chunk are collected at audit_inode{,_child}() |
| 50 | * time and used in AUDIT_TREE rule matching. |
| 51 | * These references are dropped at the same time we are calling |
| 52 | * audit_free_names(), etc. |
| 53 | * |
| 54 | * Cyclic lists galore: |
| 55 | * tree.chunks anchors chunk.owners[].list hash_lock |
| 56 | * tree.rules anchors rule.rlist audit_filter_mutex |
| 57 | * chunk.trees anchors tree.same_root hash_lock |
| 58 | * chunk.hash is a hash with middle bits of watch.inode as |
| 59 | * a hash function. RCU, hash_lock |
| 60 | * |
| 61 | * tree is refcounted; one reference for "some rules on rules_list refer to |
| 62 | * it", one for each chunk with pointer to it. |
| 63 | * |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 64 | * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount |
Al Viro | 8f7b0ba | 2008-11-15 01:15:43 +0000 | [diff] [blame] | 65 | * of watch contributes 1 to .refs). |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 66 | * |
| 67 | * node.index allows to get from node.list to containing chunk. |
| 68 | * MSB of that sucker is stolen to mark taggings that we might have to |
| 69 | * revert - several operations have very unpleasant cleanup logics and |
| 70 | * that makes a difference. Some. |
| 71 | */ |
| 72 | |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 73 | static struct fsnotify_group *audit_tree_group; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 74 | |
| 75 | static struct audit_tree *alloc_tree(const char *s) |
| 76 | { |
| 77 | struct audit_tree *tree; |
| 78 | |
| 79 | tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL); |
| 80 | if (tree) { |
Elena Reshetova | 9d2378f | 2017-05-02 10:16:04 -0400 | [diff] [blame^] | 81 | refcount_set(&tree->count, 1); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 82 | tree->goner = 0; |
| 83 | INIT_LIST_HEAD(&tree->chunks); |
| 84 | INIT_LIST_HEAD(&tree->rules); |
| 85 | INIT_LIST_HEAD(&tree->list); |
| 86 | INIT_LIST_HEAD(&tree->same_root); |
| 87 | tree->root = NULL; |
| 88 | strcpy(tree->pathname, s); |
| 89 | } |
| 90 | return tree; |
| 91 | } |
| 92 | |
| 93 | static inline void get_tree(struct audit_tree *tree) |
| 94 | { |
Elena Reshetova | 9d2378f | 2017-05-02 10:16:04 -0400 | [diff] [blame^] | 95 | refcount_inc(&tree->count); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 96 | } |
| 97 | |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 98 | static inline void put_tree(struct audit_tree *tree) |
| 99 | { |
Elena Reshetova | 9d2378f | 2017-05-02 10:16:04 -0400 | [diff] [blame^] | 100 | if (refcount_dec_and_test(&tree->count)) |
Lai Jiangshan | 3b097c4 | 2011-03-15 18:03:53 +0800 | [diff] [blame] | 101 | kfree_rcu(tree, head); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 102 | } |
| 103 | |
| 104 | /* to avoid bringing the entire thing in audit.h */ |
| 105 | const char *audit_tree_path(struct audit_tree *tree) |
| 106 | { |
| 107 | return tree->pathname; |
| 108 | } |
| 109 | |
Al Viro | 8f7b0ba | 2008-11-15 01:15:43 +0000 | [diff] [blame] | 110 | static void free_chunk(struct audit_chunk *chunk) |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 111 | { |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 112 | int i; |
| 113 | |
| 114 | for (i = 0; i < chunk->count; i++) { |
| 115 | if (chunk->owners[i].owner) |
| 116 | put_tree(chunk->owners[i].owner); |
| 117 | } |
| 118 | kfree(chunk); |
| 119 | } |
| 120 | |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 121 | void audit_put_chunk(struct audit_chunk *chunk) |
| 122 | { |
Al Viro | 8f7b0ba | 2008-11-15 01:15:43 +0000 | [diff] [blame] | 123 | if (atomic_long_dec_and_test(&chunk->refs)) |
| 124 | free_chunk(chunk); |
| 125 | } |
| 126 | |
| 127 | static void __put_chunk(struct rcu_head *rcu) |
| 128 | { |
| 129 | struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); |
| 130 | audit_put_chunk(chunk); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 131 | } |
| 132 | |
Eric Paris | e61ce86 | 2009-12-17 21:24:24 -0500 | [diff] [blame] | 133 | static void audit_tree_destroy_watch(struct fsnotify_mark *entry) |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 134 | { |
| 135 | struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); |
| 136 | call_rcu(&chunk->head, __put_chunk); |
| 137 | } |
| 138 | |
| 139 | static struct audit_chunk *alloc_chunk(int count) |
| 140 | { |
| 141 | struct audit_chunk *chunk; |
| 142 | size_t size; |
| 143 | int i; |
| 144 | |
| 145 | size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node); |
| 146 | chunk = kzalloc(size, GFP_KERNEL); |
| 147 | if (!chunk) |
| 148 | return NULL; |
| 149 | |
| 150 | INIT_LIST_HEAD(&chunk->hash); |
| 151 | INIT_LIST_HEAD(&chunk->trees); |
| 152 | chunk->count = count; |
| 153 | atomic_long_set(&chunk->refs, 1); |
| 154 | for (i = 0; i < count; i++) { |
| 155 | INIT_LIST_HEAD(&chunk->owners[i].list); |
| 156 | chunk->owners[i].index = i; |
| 157 | } |
| 158 | fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch); |
Miklos Szeredi | 799b601 | 2014-11-04 11:27:12 +0100 | [diff] [blame] | 159 | chunk->mark.mask = FS_IN_IGNORED; |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 160 | return chunk; |
| 161 | } |
| 162 | |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 163 | enum {HASH_SIZE = 128}; |
| 164 | static struct list_head chunk_hash_heads[HASH_SIZE]; |
| 165 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock); |
| 166 | |
| 167 | static inline struct list_head *chunk_hash(const struct inode *inode) |
| 168 | { |
| 169 | unsigned long n = (unsigned long)inode / L1_CACHE_BYTES; |
| 170 | return chunk_hash_heads + n % HASH_SIZE; |
| 171 | } |
| 172 | |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 173 | /* hash_lock & entry->lock is held by caller */ |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 174 | static void insert_hash(struct audit_chunk *chunk) |
| 175 | { |
Eric Paris | e61ce86 | 2009-12-17 21:24:24 -0500 | [diff] [blame] | 176 | struct fsnotify_mark *entry = &chunk->mark; |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 177 | struct list_head *list; |
| 178 | |
Jan Kara | 0809ab6 | 2014-12-12 16:58:36 -0800 | [diff] [blame] | 179 | if (!entry->inode) |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 180 | return; |
Jan Kara | 0809ab6 | 2014-12-12 16:58:36 -0800 | [diff] [blame] | 181 | list = chunk_hash(entry->inode); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 182 | list_add_rcu(&chunk->hash, list); |
| 183 | } |
| 184 | |
| 185 | /* called under rcu_read_lock */ |
| 186 | struct audit_chunk *audit_tree_lookup(const struct inode *inode) |
| 187 | { |
| 188 | struct list_head *list = chunk_hash(inode); |
Paul E. McKenney | 6793a05 | 2008-05-14 17:10:12 -0700 | [diff] [blame] | 189 | struct audit_chunk *p; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 190 | |
Paul E. McKenney | 6793a05 | 2008-05-14 17:10:12 -0700 | [diff] [blame] | 191 | list_for_each_entry_rcu(p, list, hash) { |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 192 | /* mark.inode may have gone NULL, but who cares? */ |
Jan Kara | 0809ab6 | 2014-12-12 16:58:36 -0800 | [diff] [blame] | 193 | if (p->mark.inode == inode) { |
Al Viro | 8f7b0ba | 2008-11-15 01:15:43 +0000 | [diff] [blame] | 194 | atomic_long_inc(&p->refs); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 195 | return p; |
| 196 | } |
| 197 | } |
| 198 | return NULL; |
| 199 | } |
| 200 | |
Yaowei Bai | 6f1b5d7 | 2015-11-04 08:23:51 -0500 | [diff] [blame] | 201 | bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree) |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 202 | { |
| 203 | int n; |
| 204 | for (n = 0; n < chunk->count; n++) |
| 205 | if (chunk->owners[n].owner == tree) |
Yaowei Bai | 6f1b5d7 | 2015-11-04 08:23:51 -0500 | [diff] [blame] | 206 | return true; |
| 207 | return false; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 208 | } |
| 209 | |
| 210 | /* tagging and untagging inodes with trees */ |
| 211 | |
Al Viro | 8f7b0ba | 2008-11-15 01:15:43 +0000 | [diff] [blame] | 212 | static struct audit_chunk *find_chunk(struct node *p) |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 213 | { |
Al Viro | 8f7b0ba | 2008-11-15 01:15:43 +0000 | [diff] [blame] | 214 | int index = p->index & ~(1U<<31); |
| 215 | p -= index; |
| 216 | return container_of(p, struct audit_chunk, owners[0]); |
| 217 | } |
| 218 | |
| 219 | static void untag_chunk(struct node *p) |
| 220 | { |
| 221 | struct audit_chunk *chunk = find_chunk(p); |
Eric Paris | e61ce86 | 2009-12-17 21:24:24 -0500 | [diff] [blame] | 222 | struct fsnotify_mark *entry = &chunk->mark; |
Al Viro | f7a998a | 2010-10-30 02:18:32 -0400 | [diff] [blame] | 223 | struct audit_chunk *new = NULL; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 224 | struct audit_tree *owner; |
| 225 | int size = chunk->count - 1; |
| 226 | int i, j; |
| 227 | |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 228 | fsnotify_get_mark(entry); |
Al Viro | 8f7b0ba | 2008-11-15 01:15:43 +0000 | [diff] [blame] | 229 | |
| 230 | spin_unlock(&hash_lock); |
| 231 | |
Al Viro | f7a998a | 2010-10-30 02:18:32 -0400 | [diff] [blame] | 232 | if (size) |
| 233 | new = alloc_chunk(size); |
| 234 | |
Jan Kara | be29d20 | 2016-12-14 14:40:05 +0100 | [diff] [blame] | 235 | mutex_lock(&entry->group->mark_mutex); |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 236 | spin_lock(&entry->lock); |
Jan Kara | 0809ab6 | 2014-12-12 16:58:36 -0800 | [diff] [blame] | 237 | if (chunk->dead || !entry->inode) { |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 238 | spin_unlock(&entry->lock); |
Jan Kara | be29d20 | 2016-12-14 14:40:05 +0100 | [diff] [blame] | 239 | mutex_unlock(&entry->group->mark_mutex); |
Al Viro | f7a998a | 2010-10-30 02:18:32 -0400 | [diff] [blame] | 240 | if (new) |
| 241 | free_chunk(new); |
Al Viro | 8f7b0ba | 2008-11-15 01:15:43 +0000 | [diff] [blame] | 242 | goto out; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 243 | } |
| 244 | |
| 245 | owner = p->owner; |
| 246 | |
| 247 | if (!size) { |
| 248 | chunk->dead = 1; |
| 249 | spin_lock(&hash_lock); |
| 250 | list_del_init(&chunk->trees); |
| 251 | if (owner->root == chunk) |
| 252 | owner->root = NULL; |
| 253 | list_del_init(&p->list); |
| 254 | list_del_rcu(&chunk->hash); |
| 255 | spin_unlock(&hash_lock); |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 256 | spin_unlock(&entry->lock); |
Jan Kara | be29d20 | 2016-12-14 14:40:05 +0100 | [diff] [blame] | 257 | mutex_unlock(&entry->group->mark_mutex); |
Lino Sanfilippo | e2a2994 | 2011-06-14 17:29:51 +0200 | [diff] [blame] | 258 | fsnotify_destroy_mark(entry, audit_tree_group); |
Al Viro | 8f7b0ba | 2008-11-15 01:15:43 +0000 | [diff] [blame] | 259 | goto out; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 260 | } |
| 261 | |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 262 | if (!new) |
| 263 | goto Fallback; |
Al Viro | f7a998a | 2010-10-30 02:18:32 -0400 | [diff] [blame] | 264 | |
Jan Kara | be29d20 | 2016-12-14 14:40:05 +0100 | [diff] [blame] | 265 | if (fsnotify_add_mark_locked(&new->mark, entry->group, entry->inode, |
| 266 | NULL, 1)) { |
Miklos Szeredi | 0fe33aa | 2012-08-15 12:55:22 +0200 | [diff] [blame] | 267 | fsnotify_put_mark(&new->mark); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 268 | goto Fallback; |
| 269 | } |
| 270 | |
| 271 | chunk->dead = 1; |
| 272 | spin_lock(&hash_lock); |
| 273 | list_replace_init(&chunk->trees, &new->trees); |
| 274 | if (owner->root == chunk) { |
| 275 | list_del_init(&owner->same_root); |
| 276 | owner->root = NULL; |
| 277 | } |
| 278 | |
Al Viro | 6f5d511 | 2009-12-19 15:59:45 +0000 | [diff] [blame] | 279 | for (i = j = 0; j <= size; i++, j++) { |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 280 | struct audit_tree *s; |
| 281 | if (&chunk->owners[j] == p) { |
| 282 | list_del_init(&p->list); |
| 283 | i--; |
| 284 | continue; |
| 285 | } |
| 286 | s = chunk->owners[j].owner; |
| 287 | new->owners[i].owner = s; |
| 288 | new->owners[i].index = chunk->owners[j].index - j + i; |
| 289 | if (!s) /* result of earlier fallback */ |
| 290 | continue; |
| 291 | get_tree(s); |
Al Viro | 6f5d511 | 2009-12-19 15:59:45 +0000 | [diff] [blame] | 292 | list_replace_init(&chunk->owners[j].list, &new->owners[i].list); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 293 | } |
| 294 | |
| 295 | list_replace_rcu(&chunk->hash, &new->hash); |
| 296 | list_for_each_entry(owner, &new->trees, same_root) |
| 297 | owner->root = new; |
| 298 | spin_unlock(&hash_lock); |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 299 | spin_unlock(&entry->lock); |
Jan Kara | be29d20 | 2016-12-14 14:40:05 +0100 | [diff] [blame] | 300 | mutex_unlock(&entry->group->mark_mutex); |
Lino Sanfilippo | e2a2994 | 2011-06-14 17:29:51 +0200 | [diff] [blame] | 301 | fsnotify_destroy_mark(entry, audit_tree_group); |
Miklos Szeredi | b3e8692 | 2012-08-15 12:55:22 +0200 | [diff] [blame] | 302 | fsnotify_put_mark(&new->mark); /* drop initial reference */ |
Al Viro | 8f7b0ba | 2008-11-15 01:15:43 +0000 | [diff] [blame] | 303 | goto out; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 304 | |
| 305 | Fallback: |
| 306 | // do the best we can |
| 307 | spin_lock(&hash_lock); |
| 308 | if (owner->root == chunk) { |
| 309 | list_del_init(&owner->same_root); |
| 310 | owner->root = NULL; |
| 311 | } |
| 312 | list_del_init(&p->list); |
| 313 | p->owner = NULL; |
| 314 | put_tree(owner); |
| 315 | spin_unlock(&hash_lock); |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 316 | spin_unlock(&entry->lock); |
Jan Kara | be29d20 | 2016-12-14 14:40:05 +0100 | [diff] [blame] | 317 | mutex_unlock(&entry->group->mark_mutex); |
Al Viro | 8f7b0ba | 2008-11-15 01:15:43 +0000 | [diff] [blame] | 318 | out: |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 319 | fsnotify_put_mark(entry); |
Al Viro | 8f7b0ba | 2008-11-15 01:15:43 +0000 | [diff] [blame] | 320 | spin_lock(&hash_lock); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 321 | } |
| 322 | |
| 323 | static int create_chunk(struct inode *inode, struct audit_tree *tree) |
| 324 | { |
Eric Paris | e61ce86 | 2009-12-17 21:24:24 -0500 | [diff] [blame] | 325 | struct fsnotify_mark *entry; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 326 | struct audit_chunk *chunk = alloc_chunk(1); |
| 327 | if (!chunk) |
| 328 | return -ENOMEM; |
| 329 | |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 330 | entry = &chunk->mark; |
Eric Paris | 5444e29 | 2009-12-17 21:24:27 -0500 | [diff] [blame] | 331 | if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) { |
Miklos Szeredi | 0fe33aa | 2012-08-15 12:55:22 +0200 | [diff] [blame] | 332 | fsnotify_put_mark(entry); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 333 | return -ENOSPC; |
| 334 | } |
| 335 | |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 336 | spin_lock(&entry->lock); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 337 | spin_lock(&hash_lock); |
| 338 | if (tree->goner) { |
| 339 | spin_unlock(&hash_lock); |
| 340 | chunk->dead = 1; |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 341 | spin_unlock(&entry->lock); |
Lino Sanfilippo | e2a2994 | 2011-06-14 17:29:51 +0200 | [diff] [blame] | 342 | fsnotify_destroy_mark(entry, audit_tree_group); |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 343 | fsnotify_put_mark(entry); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 344 | return 0; |
| 345 | } |
| 346 | chunk->owners[0].index = (1U << 31); |
| 347 | chunk->owners[0].owner = tree; |
| 348 | get_tree(tree); |
| 349 | list_add(&chunk->owners[0].list, &tree->chunks); |
| 350 | if (!tree->root) { |
| 351 | tree->root = chunk; |
| 352 | list_add(&tree->same_root, &chunk->trees); |
| 353 | } |
| 354 | insert_hash(chunk); |
| 355 | spin_unlock(&hash_lock); |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 356 | spin_unlock(&entry->lock); |
Miklos Szeredi | b3e8692 | 2012-08-15 12:55:22 +0200 | [diff] [blame] | 357 | fsnotify_put_mark(entry); /* drop initial reference */ |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 358 | return 0; |
| 359 | } |
| 360 | |
| 361 | /* the first tagged inode becomes root of tree */ |
| 362 | static int tag_chunk(struct inode *inode, struct audit_tree *tree) |
| 363 | { |
Eric Paris | e61ce86 | 2009-12-17 21:24:24 -0500 | [diff] [blame] | 364 | struct fsnotify_mark *old_entry, *chunk_entry; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 365 | struct audit_tree *owner; |
| 366 | struct audit_chunk *chunk, *old; |
| 367 | struct node *p; |
| 368 | int n; |
| 369 | |
Eric Paris | 5444e29 | 2009-12-17 21:24:27 -0500 | [diff] [blame] | 370 | old_entry = fsnotify_find_inode_mark(audit_tree_group, inode); |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 371 | if (!old_entry) |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 372 | return create_chunk(inode, tree); |
| 373 | |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 374 | old = container_of(old_entry, struct audit_chunk, mark); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 375 | |
| 376 | /* are we already there? */ |
| 377 | spin_lock(&hash_lock); |
| 378 | for (n = 0; n < old->count; n++) { |
| 379 | if (old->owners[n].owner == tree) { |
| 380 | spin_unlock(&hash_lock); |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 381 | fsnotify_put_mark(old_entry); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 382 | return 0; |
| 383 | } |
| 384 | } |
| 385 | spin_unlock(&hash_lock); |
| 386 | |
| 387 | chunk = alloc_chunk(old->count + 1); |
Al Viro | b4c30aa | 2009-12-19 16:03:30 +0000 | [diff] [blame] | 388 | if (!chunk) { |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 389 | fsnotify_put_mark(old_entry); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 390 | return -ENOMEM; |
Al Viro | b4c30aa | 2009-12-19 16:03:30 +0000 | [diff] [blame] | 391 | } |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 392 | |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 393 | chunk_entry = &chunk->mark; |
| 394 | |
Jan Kara | be29d20 | 2016-12-14 14:40:05 +0100 | [diff] [blame] | 395 | mutex_lock(&old_entry->group->mark_mutex); |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 396 | spin_lock(&old_entry->lock); |
Jan Kara | 0809ab6 | 2014-12-12 16:58:36 -0800 | [diff] [blame] | 397 | if (!old_entry->inode) { |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 398 | /* old_entry is being shot, lets just lie */ |
| 399 | spin_unlock(&old_entry->lock); |
Jan Kara | be29d20 | 2016-12-14 14:40:05 +0100 | [diff] [blame] | 400 | mutex_unlock(&old_entry->group->mark_mutex); |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 401 | fsnotify_put_mark(old_entry); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 402 | free_chunk(chunk); |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 403 | return -ENOENT; |
| 404 | } |
| 405 | |
Jan Kara | be29d20 | 2016-12-14 14:40:05 +0100 | [diff] [blame] | 406 | if (fsnotify_add_mark_locked(chunk_entry, old_entry->group, |
| 407 | old_entry->inode, NULL, 1)) { |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 408 | spin_unlock(&old_entry->lock); |
Jan Kara | be29d20 | 2016-12-14 14:40:05 +0100 | [diff] [blame] | 409 | mutex_unlock(&old_entry->group->mark_mutex); |
Miklos Szeredi | 0fe33aa | 2012-08-15 12:55:22 +0200 | [diff] [blame] | 410 | fsnotify_put_mark(chunk_entry); |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 411 | fsnotify_put_mark(old_entry); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 412 | return -ENOSPC; |
| 413 | } |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 414 | |
| 415 | /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */ |
| 416 | spin_lock(&chunk_entry->lock); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 417 | spin_lock(&hash_lock); |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 418 | |
| 419 | /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */ |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 420 | if (tree->goner) { |
| 421 | spin_unlock(&hash_lock); |
| 422 | chunk->dead = 1; |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 423 | spin_unlock(&chunk_entry->lock); |
| 424 | spin_unlock(&old_entry->lock); |
Jan Kara | be29d20 | 2016-12-14 14:40:05 +0100 | [diff] [blame] | 425 | mutex_unlock(&old_entry->group->mark_mutex); |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 426 | |
Lino Sanfilippo | e2a2994 | 2011-06-14 17:29:51 +0200 | [diff] [blame] | 427 | fsnotify_destroy_mark(chunk_entry, audit_tree_group); |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 428 | |
| 429 | fsnotify_put_mark(chunk_entry); |
| 430 | fsnotify_put_mark(old_entry); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 431 | return 0; |
| 432 | } |
| 433 | list_replace_init(&old->trees, &chunk->trees); |
| 434 | for (n = 0, p = chunk->owners; n < old->count; n++, p++) { |
| 435 | struct audit_tree *s = old->owners[n].owner; |
| 436 | p->owner = s; |
| 437 | p->index = old->owners[n].index; |
| 438 | if (!s) /* result of fallback in untag */ |
| 439 | continue; |
| 440 | get_tree(s); |
| 441 | list_replace_init(&old->owners[n].list, &p->list); |
| 442 | } |
| 443 | p->index = (chunk->count - 1) | (1U<<31); |
| 444 | p->owner = tree; |
| 445 | get_tree(tree); |
| 446 | list_add(&p->list, &tree->chunks); |
| 447 | list_replace_rcu(&old->hash, &chunk->hash); |
| 448 | list_for_each_entry(owner, &chunk->trees, same_root) |
| 449 | owner->root = chunk; |
| 450 | old->dead = 1; |
| 451 | if (!tree->root) { |
| 452 | tree->root = chunk; |
| 453 | list_add(&tree->same_root, &chunk->trees); |
| 454 | } |
| 455 | spin_unlock(&hash_lock); |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 456 | spin_unlock(&chunk_entry->lock); |
| 457 | spin_unlock(&old_entry->lock); |
Jan Kara | be29d20 | 2016-12-14 14:40:05 +0100 | [diff] [blame] | 458 | mutex_unlock(&old_entry->group->mark_mutex); |
Lino Sanfilippo | e2a2994 | 2011-06-14 17:29:51 +0200 | [diff] [blame] | 459 | fsnotify_destroy_mark(old_entry, audit_tree_group); |
Miklos Szeredi | b3e8692 | 2012-08-15 12:55:22 +0200 | [diff] [blame] | 460 | fsnotify_put_mark(chunk_entry); /* drop initial reference */ |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 461 | fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 462 | return 0; |
| 463 | } |
| 464 | |
Richard Guy Briggs | 2991dd2 | 2014-10-02 22:05:24 -0400 | [diff] [blame] | 465 | static void audit_tree_log_remove_rule(struct audit_krule *rule) |
Kees Cook | 0644ec0 | 2013-01-11 14:32:07 -0800 | [diff] [blame] | 466 | { |
| 467 | struct audit_buffer *ab; |
| 468 | |
| 469 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); |
| 470 | if (unlikely(!ab)) |
| 471 | return; |
Steve Grubb | c1e8f06 | 2016-11-16 16:14:33 -0500 | [diff] [blame] | 472 | audit_log_format(ab, "op=remove_rule"); |
Kees Cook | 0644ec0 | 2013-01-11 14:32:07 -0800 | [diff] [blame] | 473 | audit_log_format(ab, " dir="); |
| 474 | audit_log_untrustedstring(ab, rule->tree->pathname); |
| 475 | audit_log_key(ab, rule->filterkey); |
| 476 | audit_log_format(ab, " list=%d res=1", rule->listnr); |
| 477 | audit_log_end(ab); |
| 478 | } |
| 479 | |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 480 | static void kill_rules(struct audit_tree *tree) |
| 481 | { |
| 482 | struct audit_krule *rule, *next; |
| 483 | struct audit_entry *entry; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 484 | |
| 485 | list_for_each_entry_safe(rule, next, &tree->rules, rlist) { |
| 486 | entry = container_of(rule, struct audit_entry, rule); |
| 487 | |
| 488 | list_del_init(&rule->rlist); |
| 489 | if (rule->tree) { |
| 490 | /* not a half-baked one */ |
Richard Guy Briggs | 2991dd2 | 2014-10-02 22:05:24 -0400 | [diff] [blame] | 491 | audit_tree_log_remove_rule(rule); |
Richard Guy Briggs | 34d99af5 | 2015-08-05 16:29:37 -0400 | [diff] [blame] | 492 | if (entry->rule.exe) |
| 493 | audit_remove_mark(entry->rule.exe); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 494 | rule->tree = NULL; |
| 495 | list_del_rcu(&entry->list); |
Al Viro | e45aa21 | 2008-12-15 01:17:50 -0500 | [diff] [blame] | 496 | list_del(&entry->rule.list); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 497 | call_rcu(&entry->rcu, audit_free_rule_rcu); |
| 498 | } |
| 499 | } |
| 500 | } |
| 501 | |
| 502 | /* |
| 503 | * finish killing struct audit_tree |
| 504 | */ |
| 505 | static void prune_one(struct audit_tree *victim) |
| 506 | { |
| 507 | spin_lock(&hash_lock); |
| 508 | while (!list_empty(&victim->chunks)) { |
| 509 | struct node *p; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 510 | |
| 511 | p = list_entry(victim->chunks.next, struct node, list); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 512 | |
Al Viro | 8f7b0ba | 2008-11-15 01:15:43 +0000 | [diff] [blame] | 513 | untag_chunk(p); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 514 | } |
| 515 | spin_unlock(&hash_lock); |
| 516 | put_tree(victim); |
| 517 | } |
| 518 | |
| 519 | /* trim the uncommitted chunks from tree */ |
| 520 | |
| 521 | static void trim_marked(struct audit_tree *tree) |
| 522 | { |
| 523 | struct list_head *p, *q; |
| 524 | spin_lock(&hash_lock); |
| 525 | if (tree->goner) { |
| 526 | spin_unlock(&hash_lock); |
| 527 | return; |
| 528 | } |
| 529 | /* reorder */ |
| 530 | for (p = tree->chunks.next; p != &tree->chunks; p = q) { |
| 531 | struct node *node = list_entry(p, struct node, list); |
| 532 | q = p->next; |
| 533 | if (node->index & (1U<<31)) { |
| 534 | list_del_init(p); |
| 535 | list_add(p, &tree->chunks); |
| 536 | } |
| 537 | } |
| 538 | |
| 539 | while (!list_empty(&tree->chunks)) { |
| 540 | struct node *node; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 541 | |
| 542 | node = list_entry(tree->chunks.next, struct node, list); |
| 543 | |
| 544 | /* have we run out of marked? */ |
| 545 | if (!(node->index & (1U<<31))) |
| 546 | break; |
| 547 | |
Al Viro | 8f7b0ba | 2008-11-15 01:15:43 +0000 | [diff] [blame] | 548 | untag_chunk(node); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 549 | } |
| 550 | if (!tree->root && !tree->goner) { |
| 551 | tree->goner = 1; |
| 552 | spin_unlock(&hash_lock); |
| 553 | mutex_lock(&audit_filter_mutex); |
| 554 | kill_rules(tree); |
| 555 | list_del_init(&tree->list); |
| 556 | mutex_unlock(&audit_filter_mutex); |
| 557 | prune_one(tree); |
| 558 | } else { |
| 559 | spin_unlock(&hash_lock); |
| 560 | } |
| 561 | } |
| 562 | |
Al Viro | 916d757 | 2009-06-24 00:02:38 -0400 | [diff] [blame] | 563 | static void audit_schedule_prune(void); |
| 564 | |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 565 | /* called with audit_filter_mutex */ |
| 566 | int audit_remove_tree_rule(struct audit_krule *rule) |
| 567 | { |
| 568 | struct audit_tree *tree; |
| 569 | tree = rule->tree; |
| 570 | if (tree) { |
| 571 | spin_lock(&hash_lock); |
| 572 | list_del_init(&rule->rlist); |
| 573 | if (list_empty(&tree->rules) && !tree->goner) { |
| 574 | tree->root = NULL; |
| 575 | list_del_init(&tree->same_root); |
| 576 | tree->goner = 1; |
| 577 | list_move(&tree->list, &prune_list); |
| 578 | rule->tree = NULL; |
| 579 | spin_unlock(&hash_lock); |
| 580 | audit_schedule_prune(); |
| 581 | return 1; |
| 582 | } |
| 583 | rule->tree = NULL; |
| 584 | spin_unlock(&hash_lock); |
| 585 | return 1; |
| 586 | } |
| 587 | return 0; |
| 588 | } |
| 589 | |
Al Viro | 1f70713 | 2010-01-30 22:51:25 -0500 | [diff] [blame] | 590 | static int compare_root(struct vfsmount *mnt, void *arg) |
| 591 | { |
David Howells | 3b36215 | 2015-03-17 22:26:21 +0000 | [diff] [blame] | 592 | return d_backing_inode(mnt->mnt_root) == arg; |
Al Viro | 1f70713 | 2010-01-30 22:51:25 -0500 | [diff] [blame] | 593 | } |
| 594 | |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 595 | void audit_trim_trees(void) |
| 596 | { |
| 597 | struct list_head cursor; |
| 598 | |
| 599 | mutex_lock(&audit_filter_mutex); |
| 600 | list_add(&cursor, &tree_list); |
| 601 | while (cursor.next != &tree_list) { |
| 602 | struct audit_tree *tree; |
Al Viro | 98bc993 | 2008-08-02 01:06:21 -0400 | [diff] [blame] | 603 | struct path path; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 604 | struct vfsmount *root_mnt; |
| 605 | struct node *node; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 606 | int err; |
| 607 | |
| 608 | tree = container_of(cursor.next, struct audit_tree, list); |
| 609 | get_tree(tree); |
| 610 | list_del(&cursor); |
| 611 | list_add(&cursor, &tree->list); |
| 612 | mutex_unlock(&audit_filter_mutex); |
| 613 | |
Al Viro | 98bc993 | 2008-08-02 01:06:21 -0400 | [diff] [blame] | 614 | err = kern_path(tree->pathname, 0, &path); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 615 | if (err) |
| 616 | goto skip_it; |
| 617 | |
Al Viro | 589ff87 | 2009-04-18 03:28:19 -0400 | [diff] [blame] | 618 | root_mnt = collect_mounts(&path); |
Al Viro | 98bc993 | 2008-08-02 01:06:21 -0400 | [diff] [blame] | 619 | path_put(&path); |
David Howells | be34d1a | 2012-06-25 12:55:18 +0100 | [diff] [blame] | 620 | if (IS_ERR(root_mnt)) |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 621 | goto skip_it; |
| 622 | |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 623 | spin_lock(&hash_lock); |
| 624 | list_for_each_entry(node, &tree->chunks, list) { |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 625 | struct audit_chunk *chunk = find_chunk(node); |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 626 | /* this could be NULL if the watch is dying else where... */ |
Jan Kara | 0809ab6 | 2014-12-12 16:58:36 -0800 | [diff] [blame] | 627 | struct inode *inode = chunk->mark.inode; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 628 | node->index |= 1U<<31; |
Al Viro | 1f70713 | 2010-01-30 22:51:25 -0500 | [diff] [blame] | 629 | if (iterate_mounts(compare_root, inode, root_mnt)) |
| 630 | node->index &= ~(1U<<31); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 631 | } |
| 632 | spin_unlock(&hash_lock); |
| 633 | trim_marked(tree); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 634 | drop_collected_mounts(root_mnt); |
| 635 | skip_it: |
Chen Gang | 12b2f11 | 2013-04-29 15:05:19 -0700 | [diff] [blame] | 636 | put_tree(tree); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 637 | mutex_lock(&audit_filter_mutex); |
| 638 | } |
| 639 | list_del(&cursor); |
| 640 | mutex_unlock(&audit_filter_mutex); |
| 641 | } |
| 642 | |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 643 | int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op) |
| 644 | { |
| 645 | |
| 646 | if (pathname[0] != '/' || |
| 647 | rule->listnr != AUDIT_FILTER_EXIT || |
Al Viro | 5af75d8 | 2008-12-16 05:59:26 -0500 | [diff] [blame] | 648 | op != Audit_equal || |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 649 | rule->inode_f || rule->watch || rule->tree) |
| 650 | return -EINVAL; |
| 651 | rule->tree = alloc_tree(pathname); |
| 652 | if (!rule->tree) |
| 653 | return -ENOMEM; |
| 654 | return 0; |
| 655 | } |
| 656 | |
| 657 | void audit_put_tree(struct audit_tree *tree) |
| 658 | { |
| 659 | put_tree(tree); |
| 660 | } |
| 661 | |
Al Viro | 1f70713 | 2010-01-30 22:51:25 -0500 | [diff] [blame] | 662 | static int tag_mount(struct vfsmount *mnt, void *arg) |
| 663 | { |
David Howells | 3b36215 | 2015-03-17 22:26:21 +0000 | [diff] [blame] | 664 | return tag_chunk(d_backing_inode(mnt->mnt_root), arg); |
Al Viro | 1f70713 | 2010-01-30 22:51:25 -0500 | [diff] [blame] | 665 | } |
| 666 | |
Imre Palik | f1aaf26 | 2015-02-23 15:37:59 -0500 | [diff] [blame] | 667 | /* |
| 668 | * That gets run when evict_chunk() ends up needing to kill audit_tree. |
| 669 | * Runs from a separate thread. |
| 670 | */ |
| 671 | static int prune_tree_thread(void *unused) |
| 672 | { |
| 673 | for (;;) { |
Jiri Slaby | 0bf676d | 2016-03-31 10:49:28 +0200 | [diff] [blame] | 674 | if (list_empty(&prune_list)) { |
| 675 | set_current_state(TASK_INTERRUPTIBLE); |
Imre Palik | f1aaf26 | 2015-02-23 15:37:59 -0500 | [diff] [blame] | 676 | schedule(); |
Jiri Slaby | 0bf676d | 2016-03-31 10:49:28 +0200 | [diff] [blame] | 677 | } |
Imre Palik | f1aaf26 | 2015-02-23 15:37:59 -0500 | [diff] [blame] | 678 | |
| 679 | mutex_lock(&audit_cmd_mutex); |
| 680 | mutex_lock(&audit_filter_mutex); |
| 681 | |
| 682 | while (!list_empty(&prune_list)) { |
| 683 | struct audit_tree *victim; |
| 684 | |
| 685 | victim = list_entry(prune_list.next, |
| 686 | struct audit_tree, list); |
| 687 | list_del_init(&victim->list); |
| 688 | |
| 689 | mutex_unlock(&audit_filter_mutex); |
| 690 | |
| 691 | prune_one(victim); |
| 692 | |
| 693 | mutex_lock(&audit_filter_mutex); |
| 694 | } |
| 695 | |
| 696 | mutex_unlock(&audit_filter_mutex); |
| 697 | mutex_unlock(&audit_cmd_mutex); |
| 698 | } |
| 699 | return 0; |
| 700 | } |
| 701 | |
| 702 | static int audit_launch_prune(void) |
| 703 | { |
| 704 | if (prune_thread) |
| 705 | return 0; |
Jiri Slaby | 0bf676d | 2016-03-31 10:49:28 +0200 | [diff] [blame] | 706 | prune_thread = kthread_run(prune_tree_thread, NULL, |
Imre Palik | f1aaf26 | 2015-02-23 15:37:59 -0500 | [diff] [blame] | 707 | "audit_prune_tree"); |
| 708 | if (IS_ERR(prune_thread)) { |
| 709 | pr_err("cannot start thread audit_prune_tree"); |
| 710 | prune_thread = NULL; |
| 711 | return -ENOMEM; |
Imre Palik | f1aaf26 | 2015-02-23 15:37:59 -0500 | [diff] [blame] | 712 | } |
Jiri Slaby | 0bf676d | 2016-03-31 10:49:28 +0200 | [diff] [blame] | 713 | return 0; |
Imre Palik | f1aaf26 | 2015-02-23 15:37:59 -0500 | [diff] [blame] | 714 | } |
| 715 | |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 716 | /* called with audit_filter_mutex */ |
| 717 | int audit_add_tree_rule(struct audit_krule *rule) |
| 718 | { |
| 719 | struct audit_tree *seed = rule->tree, *tree; |
Al Viro | 98bc993 | 2008-08-02 01:06:21 -0400 | [diff] [blame] | 720 | struct path path; |
Al Viro | 1f70713 | 2010-01-30 22:51:25 -0500 | [diff] [blame] | 721 | struct vfsmount *mnt; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 722 | int err; |
| 723 | |
Chen Gang | 736f320 | 2013-06-12 14:05:07 -0700 | [diff] [blame] | 724 | rule->tree = NULL; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 725 | list_for_each_entry(tree, &tree_list, list) { |
| 726 | if (!strcmp(seed->pathname, tree->pathname)) { |
| 727 | put_tree(seed); |
| 728 | rule->tree = tree; |
| 729 | list_add(&rule->rlist, &tree->rules); |
| 730 | return 0; |
| 731 | } |
| 732 | } |
| 733 | tree = seed; |
| 734 | list_add(&tree->list, &tree_list); |
| 735 | list_add(&rule->rlist, &tree->rules); |
| 736 | /* do not set rule->tree yet */ |
| 737 | mutex_unlock(&audit_filter_mutex); |
| 738 | |
Imre Palik | f1aaf26 | 2015-02-23 15:37:59 -0500 | [diff] [blame] | 739 | if (unlikely(!prune_thread)) { |
| 740 | err = audit_launch_prune(); |
| 741 | if (err) |
| 742 | goto Err; |
| 743 | } |
| 744 | |
Al Viro | 98bc993 | 2008-08-02 01:06:21 -0400 | [diff] [blame] | 745 | err = kern_path(tree->pathname, 0, &path); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 746 | if (err) |
| 747 | goto Err; |
Al Viro | 589ff87 | 2009-04-18 03:28:19 -0400 | [diff] [blame] | 748 | mnt = collect_mounts(&path); |
Al Viro | 98bc993 | 2008-08-02 01:06:21 -0400 | [diff] [blame] | 749 | path_put(&path); |
David Howells | be34d1a | 2012-06-25 12:55:18 +0100 | [diff] [blame] | 750 | if (IS_ERR(mnt)) { |
| 751 | err = PTR_ERR(mnt); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 752 | goto Err; |
| 753 | } |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 754 | |
| 755 | get_tree(tree); |
Al Viro | 1f70713 | 2010-01-30 22:51:25 -0500 | [diff] [blame] | 756 | err = iterate_mounts(tag_mount, tree, mnt); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 757 | drop_collected_mounts(mnt); |
| 758 | |
| 759 | if (!err) { |
| 760 | struct node *node; |
| 761 | spin_lock(&hash_lock); |
| 762 | list_for_each_entry(node, &tree->chunks, list) |
| 763 | node->index &= ~(1U<<31); |
| 764 | spin_unlock(&hash_lock); |
| 765 | } else { |
| 766 | trim_marked(tree); |
| 767 | goto Err; |
| 768 | } |
| 769 | |
| 770 | mutex_lock(&audit_filter_mutex); |
| 771 | if (list_empty(&rule->rlist)) { |
| 772 | put_tree(tree); |
| 773 | return -ENOENT; |
| 774 | } |
| 775 | rule->tree = tree; |
| 776 | put_tree(tree); |
| 777 | |
| 778 | return 0; |
| 779 | Err: |
| 780 | mutex_lock(&audit_filter_mutex); |
| 781 | list_del_init(&tree->list); |
| 782 | list_del_init(&tree->rules); |
| 783 | put_tree(tree); |
| 784 | return err; |
| 785 | } |
| 786 | |
| 787 | int audit_tag_tree(char *old, char *new) |
| 788 | { |
| 789 | struct list_head cursor, barrier; |
| 790 | int failed = 0; |
Al Viro | 2096f75 | 2010-01-30 13:16:21 -0500 | [diff] [blame] | 791 | struct path path1, path2; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 792 | struct vfsmount *tagged; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 793 | int err; |
| 794 | |
Al Viro | 2096f75 | 2010-01-30 13:16:21 -0500 | [diff] [blame] | 795 | err = kern_path(new, 0, &path2); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 796 | if (err) |
| 797 | return err; |
Al Viro | 2096f75 | 2010-01-30 13:16:21 -0500 | [diff] [blame] | 798 | tagged = collect_mounts(&path2); |
| 799 | path_put(&path2); |
David Howells | be34d1a | 2012-06-25 12:55:18 +0100 | [diff] [blame] | 800 | if (IS_ERR(tagged)) |
| 801 | return PTR_ERR(tagged); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 802 | |
Al Viro | 2096f75 | 2010-01-30 13:16:21 -0500 | [diff] [blame] | 803 | err = kern_path(old, 0, &path1); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 804 | if (err) { |
| 805 | drop_collected_mounts(tagged); |
| 806 | return err; |
| 807 | } |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 808 | |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 809 | mutex_lock(&audit_filter_mutex); |
| 810 | list_add(&barrier, &tree_list); |
| 811 | list_add(&cursor, &barrier); |
| 812 | |
| 813 | while (cursor.next != &tree_list) { |
| 814 | struct audit_tree *tree; |
Al Viro | 2096f75 | 2010-01-30 13:16:21 -0500 | [diff] [blame] | 815 | int good_one = 0; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 816 | |
| 817 | tree = container_of(cursor.next, struct audit_tree, list); |
| 818 | get_tree(tree); |
| 819 | list_del(&cursor); |
| 820 | list_add(&cursor, &tree->list); |
| 821 | mutex_unlock(&audit_filter_mutex); |
| 822 | |
Al Viro | 2096f75 | 2010-01-30 13:16:21 -0500 | [diff] [blame] | 823 | err = kern_path(tree->pathname, 0, &path2); |
| 824 | if (!err) { |
| 825 | good_one = path_is_under(&path1, &path2); |
| 826 | path_put(&path2); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 827 | } |
| 828 | |
Al Viro | 2096f75 | 2010-01-30 13:16:21 -0500 | [diff] [blame] | 829 | if (!good_one) { |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 830 | put_tree(tree); |
| 831 | mutex_lock(&audit_filter_mutex); |
| 832 | continue; |
| 833 | } |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 834 | |
Al Viro | 1f70713 | 2010-01-30 22:51:25 -0500 | [diff] [blame] | 835 | failed = iterate_mounts(tag_mount, tree, tagged); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 836 | if (failed) { |
| 837 | put_tree(tree); |
| 838 | mutex_lock(&audit_filter_mutex); |
| 839 | break; |
| 840 | } |
| 841 | |
| 842 | mutex_lock(&audit_filter_mutex); |
| 843 | spin_lock(&hash_lock); |
| 844 | if (!tree->goner) { |
| 845 | list_del(&tree->list); |
| 846 | list_add(&tree->list, &tree_list); |
| 847 | } |
| 848 | spin_unlock(&hash_lock); |
| 849 | put_tree(tree); |
| 850 | } |
| 851 | |
| 852 | while (barrier.prev != &tree_list) { |
| 853 | struct audit_tree *tree; |
| 854 | |
| 855 | tree = container_of(barrier.prev, struct audit_tree, list); |
| 856 | get_tree(tree); |
| 857 | list_del(&tree->list); |
| 858 | list_add(&tree->list, &barrier); |
| 859 | mutex_unlock(&audit_filter_mutex); |
| 860 | |
| 861 | if (!failed) { |
| 862 | struct node *node; |
| 863 | spin_lock(&hash_lock); |
| 864 | list_for_each_entry(node, &tree->chunks, list) |
| 865 | node->index &= ~(1U<<31); |
| 866 | spin_unlock(&hash_lock); |
| 867 | } else { |
| 868 | trim_marked(tree); |
| 869 | } |
| 870 | |
| 871 | put_tree(tree); |
| 872 | mutex_lock(&audit_filter_mutex); |
| 873 | } |
| 874 | list_del(&barrier); |
| 875 | list_del(&cursor); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 876 | mutex_unlock(&audit_filter_mutex); |
Al Viro | 2096f75 | 2010-01-30 13:16:21 -0500 | [diff] [blame] | 877 | path_put(&path1); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 878 | drop_collected_mounts(tagged); |
| 879 | return failed; |
| 880 | } |
| 881 | |
Al Viro | 916d757 | 2009-06-24 00:02:38 -0400 | [diff] [blame] | 882 | |
| 883 | static void audit_schedule_prune(void) |
| 884 | { |
Imre Palik | f1aaf26 | 2015-02-23 15:37:59 -0500 | [diff] [blame] | 885 | wake_up_process(prune_thread); |
Al Viro | 916d757 | 2009-06-24 00:02:38 -0400 | [diff] [blame] | 886 | } |
| 887 | |
| 888 | /* |
| 889 | * ... and that one is done if evict_chunk() decides to delay until the end |
| 890 | * of syscall. Runs synchronously. |
| 891 | */ |
| 892 | void audit_kill_trees(struct list_head *list) |
| 893 | { |
| 894 | mutex_lock(&audit_cmd_mutex); |
| 895 | mutex_lock(&audit_filter_mutex); |
| 896 | |
| 897 | while (!list_empty(list)) { |
| 898 | struct audit_tree *victim; |
| 899 | |
| 900 | victim = list_entry(list->next, struct audit_tree, list); |
| 901 | kill_rules(victim); |
| 902 | list_del_init(&victim->list); |
| 903 | |
| 904 | mutex_unlock(&audit_filter_mutex); |
| 905 | |
| 906 | prune_one(victim); |
| 907 | |
| 908 | mutex_lock(&audit_filter_mutex); |
| 909 | } |
| 910 | |
| 911 | mutex_unlock(&audit_filter_mutex); |
| 912 | mutex_unlock(&audit_cmd_mutex); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 913 | } |
| 914 | |
| 915 | /* |
| 916 | * Here comes the stuff asynchronous to auditctl operations |
| 917 | */ |
| 918 | |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 919 | static void evict_chunk(struct audit_chunk *chunk) |
| 920 | { |
| 921 | struct audit_tree *owner; |
Al Viro | 916d757 | 2009-06-24 00:02:38 -0400 | [diff] [blame] | 922 | struct list_head *postponed = audit_killed_trees(); |
| 923 | int need_prune = 0; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 924 | int n; |
| 925 | |
| 926 | if (chunk->dead) |
| 927 | return; |
| 928 | |
| 929 | chunk->dead = 1; |
| 930 | mutex_lock(&audit_filter_mutex); |
| 931 | spin_lock(&hash_lock); |
| 932 | while (!list_empty(&chunk->trees)) { |
| 933 | owner = list_entry(chunk->trees.next, |
| 934 | struct audit_tree, same_root); |
| 935 | owner->goner = 1; |
| 936 | owner->root = NULL; |
| 937 | list_del_init(&owner->same_root); |
| 938 | spin_unlock(&hash_lock); |
Al Viro | 916d757 | 2009-06-24 00:02:38 -0400 | [diff] [blame] | 939 | if (!postponed) { |
| 940 | kill_rules(owner); |
| 941 | list_move(&owner->list, &prune_list); |
| 942 | need_prune = 1; |
| 943 | } else { |
| 944 | list_move(&owner->list, postponed); |
| 945 | } |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 946 | spin_lock(&hash_lock); |
| 947 | } |
| 948 | list_del_rcu(&chunk->hash); |
| 949 | for (n = 0; n < chunk->count; n++) |
| 950 | list_del_init(&chunk->owners[n].list); |
| 951 | spin_unlock(&hash_lock); |
Imre Palik | f1aaf26 | 2015-02-23 15:37:59 -0500 | [diff] [blame] | 952 | mutex_unlock(&audit_filter_mutex); |
Al Viro | 916d757 | 2009-06-24 00:02:38 -0400 | [diff] [blame] | 953 | if (need_prune) |
| 954 | audit_schedule_prune(); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 955 | } |
| 956 | |
Eric Paris | 3a9b16b | 2010-07-28 10:18:38 -0400 | [diff] [blame] | 957 | static int audit_tree_handle_event(struct fsnotify_group *group, |
Jan Kara | 7053aee | 2014-01-21 15:48:14 -0800 | [diff] [blame] | 958 | struct inode *to_tell, |
Eric Paris | ce8f76f | 2010-07-28 10:18:39 -0400 | [diff] [blame] | 959 | struct fsnotify_mark *inode_mark, |
Jan Kara | 7053aee | 2014-01-21 15:48:14 -0800 | [diff] [blame] | 960 | struct fsnotify_mark *vfsmount_mark, |
Al Viro | 3cd5eca | 2016-11-20 20:19:09 -0500 | [diff] [blame] | 961 | u32 mask, const void *data, int data_type, |
Jan Kara | 45a22f4 | 2014-02-17 13:09:50 +0100 | [diff] [blame] | 962 | const unsigned char *file_name, u32 cookie) |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 963 | { |
Jan Kara | 83c4c4b | 2014-01-21 15:48:15 -0800 | [diff] [blame] | 964 | return 0; |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 965 | } |
| 966 | |
Eric Paris | e61ce86 | 2009-12-17 21:24:24 -0500 | [diff] [blame] | 967 | static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group) |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 968 | { |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 969 | struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); |
| 970 | |
| 971 | evict_chunk(chunk); |
Miklos Szeredi | b3e8692 | 2012-08-15 12:55:22 +0200 | [diff] [blame] | 972 | |
| 973 | /* |
| 974 | * We are guaranteed to have at least one reference to the mark from |
| 975 | * either the inode or the caller of fsnotify_destroy_mark(). |
| 976 | */ |
| 977 | BUG_ON(atomic_read(&entry->refcnt) < 1); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 978 | } |
| 979 | |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 980 | static const struct fsnotify_ops audit_tree_ops = { |
| 981 | .handle_event = audit_tree_handle_event, |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 982 | .freeing_mark = audit_tree_freeing_mark, |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 983 | }; |
| 984 | |
| 985 | static int __init audit_tree_init(void) |
| 986 | { |
| 987 | int i; |
| 988 | |
Eric Paris | 0d2e2a1 | 2009-12-17 21:24:22 -0500 | [diff] [blame] | 989 | audit_tree_group = fsnotify_alloc_group(&audit_tree_ops); |
Eric Paris | 28a3a7e | 2009-12-17 20:12:05 -0500 | [diff] [blame] | 990 | if (IS_ERR(audit_tree_group)) |
| 991 | audit_panic("cannot initialize fsnotify group for rectree watches"); |
Al Viro | 74c3cbe | 2007-07-22 08:04:18 -0400 | [diff] [blame] | 992 | |
| 993 | for (i = 0; i < HASH_SIZE; i++) |
| 994 | INIT_LIST_HEAD(&chunk_hash_heads[i]); |
| 995 | |
| 996 | return 0; |
| 997 | } |
| 998 | __initcall(audit_tree_init); |