blob: bac5dd90c62983f7760bb8b833aea009af0f148f [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Al Viro74c3cbe2007-07-22 08:04:18 -04002#include "audit.h"
Eric Paris28a3a7e2009-12-17 20:12:05 -05003#include <linux/fsnotify_backend.h>
Al Viro74c3cbe2007-07-22 08:04:18 -04004#include <linux/namei.h>
5#include <linux/mount.h>
Al Viro916d7572009-06-24 00:02:38 -04006#include <linux/kthread.h>
Elena Reshetova9d2378f2017-05-02 10:16:04 -04007#include <linux/refcount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09008#include <linux/slab.h>
Al Viro74c3cbe2007-07-22 08:04:18 -04009
10struct audit_tree;
11struct audit_chunk;
12
13struct audit_tree {
Elena Reshetova9d2378f2017-05-02 10:16:04 -040014 refcount_t count;
Al Viro74c3cbe2007-07-22 08:04:18 -040015 int goner;
16 struct audit_chunk *root;
17 struct list_head chunks;
18 struct list_head rules;
19 struct list_head list;
20 struct list_head same_root;
21 struct rcu_head head;
22 char pathname[];
23};
24
25struct audit_chunk {
26 struct list_head hash;
Jan Kara8d20d6e2018-11-12 09:54:48 -050027 unsigned long key;
Eric Parise61ce862009-12-17 21:24:24 -050028 struct fsnotify_mark mark;
Al Viro74c3cbe2007-07-22 08:04:18 -040029 struct list_head trees; /* with root here */
30 int dead;
31 int count;
Al Viro8f7b0ba2008-11-15 01:15:43 +000032 atomic_long_t refs;
Al Viro74c3cbe2007-07-22 08:04:18 -040033 struct rcu_head head;
34 struct node {
35 struct list_head list;
36 struct audit_tree *owner;
37 unsigned index; /* index; upper bit indicates 'will prune' */
38 } owners[];
39};
40
41static LIST_HEAD(tree_list);
42static LIST_HEAD(prune_list);
Imre Palikf1aaf262015-02-23 15:37:59 -050043static struct task_struct *prune_thread;
Al Viro74c3cbe2007-07-22 08:04:18 -040044
45/*
46 * One struct chunk is attached to each inode of interest.
47 * We replace struct chunk on tagging/untagging.
48 * Rules have pointer to struct audit_tree.
49 * Rules have struct list_head rlist forming a list of rules over
50 * the same tree.
51 * References to struct chunk are collected at audit_inode{,_child}()
52 * time and used in AUDIT_TREE rule matching.
53 * These references are dropped at the same time we are calling
54 * audit_free_names(), etc.
55 *
56 * Cyclic lists galore:
57 * tree.chunks anchors chunk.owners[].list hash_lock
58 * tree.rules anchors rule.rlist audit_filter_mutex
59 * chunk.trees anchors tree.same_root hash_lock
60 * chunk.hash is a hash with middle bits of watch.inode as
61 * a hash function. RCU, hash_lock
62 *
63 * tree is refcounted; one reference for "some rules on rules_list refer to
64 * it", one for each chunk with pointer to it.
65 *
Eric Paris28a3a7e2009-12-17 20:12:05 -050066 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
Al Viro8f7b0ba2008-11-15 01:15:43 +000067 * of watch contributes 1 to .refs).
Al Viro74c3cbe2007-07-22 08:04:18 -040068 *
69 * node.index allows to get from node.list to containing chunk.
70 * MSB of that sucker is stolen to mark taggings that we might have to
71 * revert - several operations have very unpleasant cleanup logics and
72 * that makes a difference. Some.
73 */
74
Eric Paris28a3a7e2009-12-17 20:12:05 -050075static struct fsnotify_group *audit_tree_group;
Al Viro74c3cbe2007-07-22 08:04:18 -040076
77static struct audit_tree *alloc_tree(const char *s)
78{
79 struct audit_tree *tree;
80
81 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
82 if (tree) {
Elena Reshetova9d2378f2017-05-02 10:16:04 -040083 refcount_set(&tree->count, 1);
Al Viro74c3cbe2007-07-22 08:04:18 -040084 tree->goner = 0;
85 INIT_LIST_HEAD(&tree->chunks);
86 INIT_LIST_HEAD(&tree->rules);
87 INIT_LIST_HEAD(&tree->list);
88 INIT_LIST_HEAD(&tree->same_root);
89 tree->root = NULL;
90 strcpy(tree->pathname, s);
91 }
92 return tree;
93}
94
95static inline void get_tree(struct audit_tree *tree)
96{
Elena Reshetova9d2378f2017-05-02 10:16:04 -040097 refcount_inc(&tree->count);
Al Viro74c3cbe2007-07-22 08:04:18 -040098}
99
Al Viro74c3cbe2007-07-22 08:04:18 -0400100static inline void put_tree(struct audit_tree *tree)
101{
Elena Reshetova9d2378f2017-05-02 10:16:04 -0400102 if (refcount_dec_and_test(&tree->count))
Lai Jiangshan3b097c42011-03-15 18:03:53 +0800103 kfree_rcu(tree, head);
Al Viro74c3cbe2007-07-22 08:04:18 -0400104}
105
106/* to avoid bringing the entire thing in audit.h */
107const char *audit_tree_path(struct audit_tree *tree)
108{
109 return tree->pathname;
110}
111
Al Viro8f7b0ba2008-11-15 01:15:43 +0000112static void free_chunk(struct audit_chunk *chunk)
Al Viro74c3cbe2007-07-22 08:04:18 -0400113{
Al Viro74c3cbe2007-07-22 08:04:18 -0400114 int i;
115
116 for (i = 0; i < chunk->count; i++) {
117 if (chunk->owners[i].owner)
118 put_tree(chunk->owners[i].owner);
119 }
120 kfree(chunk);
121}
122
Al Viro74c3cbe2007-07-22 08:04:18 -0400123void audit_put_chunk(struct audit_chunk *chunk)
124{
Al Viro8f7b0ba2008-11-15 01:15:43 +0000125 if (atomic_long_dec_and_test(&chunk->refs))
126 free_chunk(chunk);
127}
128
129static void __put_chunk(struct rcu_head *rcu)
130{
131 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
132 audit_put_chunk(chunk);
Al Viro74c3cbe2007-07-22 08:04:18 -0400133}
134
Eric Parise61ce862009-12-17 21:24:24 -0500135static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
Eric Paris28a3a7e2009-12-17 20:12:05 -0500136{
137 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
138 call_rcu(&chunk->head, __put_chunk);
139}
140
141static struct audit_chunk *alloc_chunk(int count)
142{
143 struct audit_chunk *chunk;
144 size_t size;
145 int i;
146
147 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
148 chunk = kzalloc(size, GFP_KERNEL);
149 if (!chunk)
150 return NULL;
151
152 INIT_LIST_HEAD(&chunk->hash);
153 INIT_LIST_HEAD(&chunk->trees);
154 chunk->count = count;
155 atomic_long_set(&chunk->refs, 1);
156 for (i = 0; i < count; i++) {
157 INIT_LIST_HEAD(&chunk->owners[i].list);
158 chunk->owners[i].index = i;
159 }
Jan Kara054c6362016-12-21 18:06:12 +0100160 fsnotify_init_mark(&chunk->mark, audit_tree_group);
Miklos Szeredi799b6012014-11-04 11:27:12 +0100161 chunk->mark.mask = FS_IN_IGNORED;
Eric Paris28a3a7e2009-12-17 20:12:05 -0500162 return chunk;
163}
164
Al Viro74c3cbe2007-07-22 08:04:18 -0400165enum {HASH_SIZE = 128};
166static struct list_head chunk_hash_heads[HASH_SIZE];
167static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
168
Jan Karaf410ff62016-12-16 10:13:37 +0100169/* Function to return search key in our hash from inode. */
170static unsigned long inode_to_key(const struct inode *inode)
Al Viro74c3cbe2007-07-22 08:04:18 -0400171{
Amir Goldstein36f10f52018-06-23 17:54:49 +0300172 /* Use address pointed to by connector->obj as the key */
173 return (unsigned long)&inode->i_fsnotify_marks;
Jan Karaf410ff62016-12-16 10:13:37 +0100174}
175
Jan Karaf410ff62016-12-16 10:13:37 +0100176static inline struct list_head *chunk_hash(unsigned long key)
177{
178 unsigned long n = key / L1_CACHE_BYTES;
Al Viro74c3cbe2007-07-22 08:04:18 -0400179 return chunk_hash_heads + n % HASH_SIZE;
180}
181
Jan Kara9f16d2e2018-10-17 12:14:52 +0200182/* hash_lock & entry->group->mark_mutex is held by caller */
Al Viro74c3cbe2007-07-22 08:04:18 -0400183static void insert_hash(struct audit_chunk *chunk)
184{
Eric Paris28a3a7e2009-12-17 20:12:05 -0500185 struct list_head *list;
186
Jan Kara43471d12017-04-03 16:47:58 +0200187 if (!(chunk->mark.flags & FSNOTIFY_MARK_FLAG_ATTACHED))
Eric Paris28a3a7e2009-12-17 20:12:05 -0500188 return;
Jan Kara8d20d6e2018-11-12 09:54:48 -0500189 WARN_ON_ONCE(!chunk->key);
190 list = chunk_hash(chunk->key);
Al Viro74c3cbe2007-07-22 08:04:18 -0400191 list_add_rcu(&chunk->hash, list);
192}
193
194/* called under rcu_read_lock */
195struct audit_chunk *audit_tree_lookup(const struct inode *inode)
196{
Jan Karaf410ff62016-12-16 10:13:37 +0100197 unsigned long key = inode_to_key(inode);
198 struct list_head *list = chunk_hash(key);
Paul E. McKenney6793a052008-05-14 17:10:12 -0700199 struct audit_chunk *p;
Al Viro74c3cbe2007-07-22 08:04:18 -0400200
Paul E. McKenney6793a052008-05-14 17:10:12 -0700201 list_for_each_entry_rcu(p, list, hash) {
Jan Kara8d20d6e2018-11-12 09:54:48 -0500202 if (p->key == key) {
Al Viro8f7b0ba2008-11-15 01:15:43 +0000203 atomic_long_inc(&p->refs);
Al Viro74c3cbe2007-07-22 08:04:18 -0400204 return p;
205 }
206 }
207 return NULL;
208}
209
Yaowei Bai6f1b5d72015-11-04 08:23:51 -0500210bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
Al Viro74c3cbe2007-07-22 08:04:18 -0400211{
212 int n;
213 for (n = 0; n < chunk->count; n++)
214 if (chunk->owners[n].owner == tree)
Yaowei Bai6f1b5d72015-11-04 08:23:51 -0500215 return true;
216 return false;
Al Viro74c3cbe2007-07-22 08:04:18 -0400217}
218
219/* tagging and untagging inodes with trees */
220
Al Viro8f7b0ba2008-11-15 01:15:43 +0000221static struct audit_chunk *find_chunk(struct node *p)
Al Viro74c3cbe2007-07-22 08:04:18 -0400222{
Al Viro8f7b0ba2008-11-15 01:15:43 +0000223 int index = p->index & ~(1U<<31);
224 p -= index;
225 return container_of(p, struct audit_chunk, owners[0]);
226}
227
228static void untag_chunk(struct node *p)
229{
230 struct audit_chunk *chunk = find_chunk(p);
Eric Parise61ce862009-12-17 21:24:24 -0500231 struct fsnotify_mark *entry = &chunk->mark;
Al Virof7a998a2010-10-30 02:18:32 -0400232 struct audit_chunk *new = NULL;
Al Viro74c3cbe2007-07-22 08:04:18 -0400233 struct audit_tree *owner;
234 int size = chunk->count - 1;
235 int i, j;
236
Eric Paris28a3a7e2009-12-17 20:12:05 -0500237 fsnotify_get_mark(entry);
Al Viro8f7b0ba2008-11-15 01:15:43 +0000238
239 spin_unlock(&hash_lock);
240
Al Virof7a998a2010-10-30 02:18:32 -0400241 if (size)
242 new = alloc_chunk(size);
243
Jan Karabe29d202016-12-14 14:40:05 +0100244 mutex_lock(&entry->group->mark_mutex);
Jan Kara6b3f05d2016-12-21 12:15:30 +0100245 /*
246 * mark_mutex protects mark from getting detached and thus also from
Amir Goldstein36f10f52018-06-23 17:54:49 +0300247 * mark->connector->obj getting NULL.
Jan Kara6b3f05d2016-12-21 12:15:30 +0100248 */
Jan Kara43471d12017-04-03 16:47:58 +0200249 if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
Jan Karabe29d202016-12-14 14:40:05 +0100250 mutex_unlock(&entry->group->mark_mutex);
Al Virof7a998a2010-10-30 02:18:32 -0400251 if (new)
Jan Kara7b1293232016-12-21 18:32:48 +0100252 fsnotify_put_mark(&new->mark);
Al Viro8f7b0ba2008-11-15 01:15:43 +0000253 goto out;
Al Viro74c3cbe2007-07-22 08:04:18 -0400254 }
255
256 owner = p->owner;
257
258 if (!size) {
259 chunk->dead = 1;
260 spin_lock(&hash_lock);
261 list_del_init(&chunk->trees);
262 if (owner->root == chunk)
263 owner->root = NULL;
264 list_del_init(&p->list);
265 list_del_rcu(&chunk->hash);
266 spin_unlock(&hash_lock);
Jan Karab1e46032018-11-12 09:54:48 -0500267 fsnotify_detach_mark(entry);
Jan Karabe29d202016-12-14 14:40:05 +0100268 mutex_unlock(&entry->group->mark_mutex);
Jan Karab1e46032018-11-12 09:54:48 -0500269 fsnotify_free_mark(entry);
Al Viro8f7b0ba2008-11-15 01:15:43 +0000270 goto out;
Al Viro74c3cbe2007-07-22 08:04:18 -0400271 }
272
Al Viro74c3cbe2007-07-22 08:04:18 -0400273 if (!new)
274 goto Fallback;
Al Virof7a998a2010-10-30 02:18:32 -0400275
Amir Goldstein36f10f52018-06-23 17:54:49 +0300276 if (fsnotify_add_mark_locked(&new->mark, entry->connector->obj,
277 FSNOTIFY_OBJ_TYPE_INODE, 1)) {
Miklos Szeredi0fe33aa2012-08-15 12:55:22 +0200278 fsnotify_put_mark(&new->mark);
Al Viro74c3cbe2007-07-22 08:04:18 -0400279 goto Fallback;
280 }
281
282 chunk->dead = 1;
283 spin_lock(&hash_lock);
Jan Kara8d20d6e2018-11-12 09:54:48 -0500284 new->key = chunk->key;
Al Viro74c3cbe2007-07-22 08:04:18 -0400285 list_replace_init(&chunk->trees, &new->trees);
286 if (owner->root == chunk) {
287 list_del_init(&owner->same_root);
288 owner->root = NULL;
289 }
290
Al Viro6f5d5112009-12-19 15:59:45 +0000291 for (i = j = 0; j <= size; i++, j++) {
Al Viro74c3cbe2007-07-22 08:04:18 -0400292 struct audit_tree *s;
293 if (&chunk->owners[j] == p) {
294 list_del_init(&p->list);
295 i--;
296 continue;
297 }
298 s = chunk->owners[j].owner;
299 new->owners[i].owner = s;
300 new->owners[i].index = chunk->owners[j].index - j + i;
301 if (!s) /* result of earlier fallback */
302 continue;
303 get_tree(s);
Al Viro6f5d5112009-12-19 15:59:45 +0000304 list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
Al Viro74c3cbe2007-07-22 08:04:18 -0400305 }
306
307 list_replace_rcu(&chunk->hash, &new->hash);
308 list_for_each_entry(owner, &new->trees, same_root)
309 owner->root = new;
310 spin_unlock(&hash_lock);
Jan Karab1e46032018-11-12 09:54:48 -0500311 fsnotify_detach_mark(entry);
Jan Karabe29d202016-12-14 14:40:05 +0100312 mutex_unlock(&entry->group->mark_mutex);
Jan Karab1e46032018-11-12 09:54:48 -0500313 fsnotify_free_mark(entry);
Miklos Szeredib3e86922012-08-15 12:55:22 +0200314 fsnotify_put_mark(&new->mark); /* drop initial reference */
Al Viro8f7b0ba2008-11-15 01:15:43 +0000315 goto out;
Al Viro74c3cbe2007-07-22 08:04:18 -0400316
317Fallback:
318 // do the best we can
319 spin_lock(&hash_lock);
320 if (owner->root == chunk) {
321 list_del_init(&owner->same_root);
322 owner->root = NULL;
323 }
324 list_del_init(&p->list);
325 p->owner = NULL;
326 put_tree(owner);
327 spin_unlock(&hash_lock);
Jan Karabe29d202016-12-14 14:40:05 +0100328 mutex_unlock(&entry->group->mark_mutex);
Al Viro8f7b0ba2008-11-15 01:15:43 +0000329out:
Eric Paris28a3a7e2009-12-17 20:12:05 -0500330 fsnotify_put_mark(entry);
Al Viro8f7b0ba2008-11-15 01:15:43 +0000331 spin_lock(&hash_lock);
Al Viro74c3cbe2007-07-22 08:04:18 -0400332}
333
Jan Karaa5789b02018-11-12 09:54:48 -0500334/* Call with group->mark_mutex held, releases it */
Al Viro74c3cbe2007-07-22 08:04:18 -0400335static int create_chunk(struct inode *inode, struct audit_tree *tree)
336{
Eric Parise61ce862009-12-17 21:24:24 -0500337 struct fsnotify_mark *entry;
Al Viro74c3cbe2007-07-22 08:04:18 -0400338 struct audit_chunk *chunk = alloc_chunk(1);
Jan Karaa5789b02018-11-12 09:54:48 -0500339
340 if (!chunk) {
341 mutex_unlock(&audit_tree_group->mark_mutex);
Al Viro74c3cbe2007-07-22 08:04:18 -0400342 return -ENOMEM;
Jan Karaa5789b02018-11-12 09:54:48 -0500343 }
Al Viro74c3cbe2007-07-22 08:04:18 -0400344
Eric Paris28a3a7e2009-12-17 20:12:05 -0500345 entry = &chunk->mark;
Jan Karaa5789b02018-11-12 09:54:48 -0500346 if (fsnotify_add_inode_mark_locked(entry, inode, 0)) {
347 mutex_unlock(&audit_tree_group->mark_mutex);
Miklos Szeredi0fe33aa2012-08-15 12:55:22 +0200348 fsnotify_put_mark(entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400349 return -ENOSPC;
350 }
351
Al Viro74c3cbe2007-07-22 08:04:18 -0400352 spin_lock(&hash_lock);
353 if (tree->goner) {
354 spin_unlock(&hash_lock);
355 chunk->dead = 1;
Jan Karab1e46032018-11-12 09:54:48 -0500356 fsnotify_detach_mark(entry);
Jan Karaa5789b02018-11-12 09:54:48 -0500357 mutex_unlock(&audit_tree_group->mark_mutex);
Jan Karab1e46032018-11-12 09:54:48 -0500358 fsnotify_free_mark(entry);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500359 fsnotify_put_mark(entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400360 return 0;
361 }
362 chunk->owners[0].index = (1U << 31);
363 chunk->owners[0].owner = tree;
364 get_tree(tree);
365 list_add(&chunk->owners[0].list, &tree->chunks);
366 if (!tree->root) {
367 tree->root = chunk;
368 list_add(&tree->same_root, &chunk->trees);
369 }
Jan Kara8d20d6e2018-11-12 09:54:48 -0500370 chunk->key = inode_to_key(inode);
Al Viro74c3cbe2007-07-22 08:04:18 -0400371 insert_hash(chunk);
372 spin_unlock(&hash_lock);
Jan Karaa5789b02018-11-12 09:54:48 -0500373 mutex_unlock(&audit_tree_group->mark_mutex);
Miklos Szeredib3e86922012-08-15 12:55:22 +0200374 fsnotify_put_mark(entry); /* drop initial reference */
Al Viro74c3cbe2007-07-22 08:04:18 -0400375 return 0;
376}
377
378/* the first tagged inode becomes root of tree */
379static int tag_chunk(struct inode *inode, struct audit_tree *tree)
380{
Eric Parise61ce862009-12-17 21:24:24 -0500381 struct fsnotify_mark *old_entry, *chunk_entry;
Al Viro74c3cbe2007-07-22 08:04:18 -0400382 struct audit_tree *owner;
383 struct audit_chunk *chunk, *old;
384 struct node *p;
385 int n;
386
Jan Karaa5789b02018-11-12 09:54:48 -0500387 mutex_lock(&audit_tree_group->mark_mutex);
Jan Karab1362ed2016-12-21 16:28:45 +0100388 old_entry = fsnotify_find_mark(&inode->i_fsnotify_marks,
389 audit_tree_group);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500390 if (!old_entry)
Al Viro74c3cbe2007-07-22 08:04:18 -0400391 return create_chunk(inode, tree);
392
Eric Paris28a3a7e2009-12-17 20:12:05 -0500393 old = container_of(old_entry, struct audit_chunk, mark);
Al Viro74c3cbe2007-07-22 08:04:18 -0400394
395 /* are we already there? */
396 spin_lock(&hash_lock);
397 for (n = 0; n < old->count; n++) {
398 if (old->owners[n].owner == tree) {
399 spin_unlock(&hash_lock);
Jan Karaa5789b02018-11-12 09:54:48 -0500400 mutex_unlock(&audit_tree_group->mark_mutex);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500401 fsnotify_put_mark(old_entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400402 return 0;
403 }
404 }
405 spin_unlock(&hash_lock);
406
407 chunk = alloc_chunk(old->count + 1);
Al Virob4c30aa2009-12-19 16:03:30 +0000408 if (!chunk) {
Jan Karaa5789b02018-11-12 09:54:48 -0500409 mutex_unlock(&audit_tree_group->mark_mutex);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500410 fsnotify_put_mark(old_entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400411 return -ENOMEM;
Al Virob4c30aa2009-12-19 16:03:30 +0000412 }
Al Viro74c3cbe2007-07-22 08:04:18 -0400413
Eric Paris28a3a7e2009-12-17 20:12:05 -0500414 chunk_entry = &chunk->mark;
415
Jan Kara6b3f05d2016-12-21 12:15:30 +0100416 /*
417 * mark_mutex protects mark from getting detached and thus also from
Amir Goldstein36f10f52018-06-23 17:54:49 +0300418 * mark->connector->obj getting NULL.
Jan Kara6b3f05d2016-12-21 12:15:30 +0100419 */
Jan Kara43471d12017-04-03 16:47:58 +0200420 if (!(old_entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
Eric Paris28a3a7e2009-12-17 20:12:05 -0500421 /* old_entry is being shot, lets just lie */
Jan Karaa5789b02018-11-12 09:54:48 -0500422 mutex_unlock(&audit_tree_group->mark_mutex);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500423 fsnotify_put_mark(old_entry);
Jan Kara7b1293232016-12-21 18:32:48 +0100424 fsnotify_put_mark(&chunk->mark);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500425 return -ENOENT;
426 }
427
Amir Goldstein36f10f52018-06-23 17:54:49 +0300428 if (fsnotify_add_mark_locked(chunk_entry, old_entry->connector->obj,
429 FSNOTIFY_OBJ_TYPE_INODE, 1)) {
Jan Karaa5789b02018-11-12 09:54:48 -0500430 mutex_unlock(&audit_tree_group->mark_mutex);
Miklos Szeredi0fe33aa2012-08-15 12:55:22 +0200431 fsnotify_put_mark(chunk_entry);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500432 fsnotify_put_mark(old_entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400433 return -ENOSPC;
434 }
Eric Paris28a3a7e2009-12-17 20:12:05 -0500435
Al Viro74c3cbe2007-07-22 08:04:18 -0400436 spin_lock(&hash_lock);
437 if (tree->goner) {
438 spin_unlock(&hash_lock);
439 chunk->dead = 1;
Jan Karab1e46032018-11-12 09:54:48 -0500440 fsnotify_detach_mark(chunk_entry);
Jan Karaa5789b02018-11-12 09:54:48 -0500441 mutex_unlock(&audit_tree_group->mark_mutex);
Jan Karab1e46032018-11-12 09:54:48 -0500442 fsnotify_free_mark(chunk_entry);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500443 fsnotify_put_mark(chunk_entry);
444 fsnotify_put_mark(old_entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400445 return 0;
446 }
Jan Kara8d20d6e2018-11-12 09:54:48 -0500447 chunk->key = old->key;
Al Viro74c3cbe2007-07-22 08:04:18 -0400448 list_replace_init(&old->trees, &chunk->trees);
449 for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
450 struct audit_tree *s = old->owners[n].owner;
451 p->owner = s;
452 p->index = old->owners[n].index;
453 if (!s) /* result of fallback in untag */
454 continue;
455 get_tree(s);
456 list_replace_init(&old->owners[n].list, &p->list);
457 }
458 p->index = (chunk->count - 1) | (1U<<31);
459 p->owner = tree;
460 get_tree(tree);
461 list_add(&p->list, &tree->chunks);
462 list_replace_rcu(&old->hash, &chunk->hash);
463 list_for_each_entry(owner, &chunk->trees, same_root)
464 owner->root = chunk;
465 old->dead = 1;
466 if (!tree->root) {
467 tree->root = chunk;
468 list_add(&tree->same_root, &chunk->trees);
469 }
470 spin_unlock(&hash_lock);
Jan Karab1e46032018-11-12 09:54:48 -0500471 fsnotify_detach_mark(old_entry);
Jan Karaa5789b02018-11-12 09:54:48 -0500472 mutex_unlock(&audit_tree_group->mark_mutex);
Jan Karab1e46032018-11-12 09:54:48 -0500473 fsnotify_free_mark(old_entry);
Miklos Szeredib3e86922012-08-15 12:55:22 +0200474 fsnotify_put_mark(chunk_entry); /* drop initial reference */
Eric Paris28a3a7e2009-12-17 20:12:05 -0500475 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
Al Viro74c3cbe2007-07-22 08:04:18 -0400476 return 0;
477}
478
Richard Guy Briggs2991dd22014-10-02 22:05:24 -0400479static void audit_tree_log_remove_rule(struct audit_krule *rule)
Kees Cook0644ec02013-01-11 14:32:07 -0800480{
481 struct audit_buffer *ab;
482
Richard Guy Briggs65a87662018-06-14 16:20:05 -0400483 if (!audit_enabled)
484 return;
Kees Cook0644ec02013-01-11 14:32:07 -0800485 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
486 if (unlikely(!ab))
487 return;
Steve Grubbc1e8f062016-11-16 16:14:33 -0500488 audit_log_format(ab, "op=remove_rule");
Kees Cook0644ec02013-01-11 14:32:07 -0800489 audit_log_format(ab, " dir=");
490 audit_log_untrustedstring(ab, rule->tree->pathname);
491 audit_log_key(ab, rule->filterkey);
492 audit_log_format(ab, " list=%d res=1", rule->listnr);
493 audit_log_end(ab);
494}
495
Al Viro74c3cbe2007-07-22 08:04:18 -0400496static void kill_rules(struct audit_tree *tree)
497{
498 struct audit_krule *rule, *next;
499 struct audit_entry *entry;
Al Viro74c3cbe2007-07-22 08:04:18 -0400500
501 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
502 entry = container_of(rule, struct audit_entry, rule);
503
504 list_del_init(&rule->rlist);
505 if (rule->tree) {
506 /* not a half-baked one */
Richard Guy Briggs2991dd22014-10-02 22:05:24 -0400507 audit_tree_log_remove_rule(rule);
Richard Guy Briggs34d99af52015-08-05 16:29:37 -0400508 if (entry->rule.exe)
509 audit_remove_mark(entry->rule.exe);
Al Viro74c3cbe2007-07-22 08:04:18 -0400510 rule->tree = NULL;
511 list_del_rcu(&entry->list);
Al Viroe45aa212008-12-15 01:17:50 -0500512 list_del(&entry->rule.list);
Al Viro74c3cbe2007-07-22 08:04:18 -0400513 call_rcu(&entry->rcu, audit_free_rule_rcu);
514 }
515 }
516}
517
518/*
519 * finish killing struct audit_tree
520 */
521static void prune_one(struct audit_tree *victim)
522{
523 spin_lock(&hash_lock);
524 while (!list_empty(&victim->chunks)) {
525 struct node *p;
Al Viro74c3cbe2007-07-22 08:04:18 -0400526
527 p = list_entry(victim->chunks.next, struct node, list);
Al Viro74c3cbe2007-07-22 08:04:18 -0400528
Al Viro8f7b0ba2008-11-15 01:15:43 +0000529 untag_chunk(p);
Al Viro74c3cbe2007-07-22 08:04:18 -0400530 }
531 spin_unlock(&hash_lock);
532 put_tree(victim);
533}
534
535/* trim the uncommitted chunks from tree */
536
537static void trim_marked(struct audit_tree *tree)
538{
539 struct list_head *p, *q;
540 spin_lock(&hash_lock);
541 if (tree->goner) {
542 spin_unlock(&hash_lock);
543 return;
544 }
545 /* reorder */
546 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
547 struct node *node = list_entry(p, struct node, list);
548 q = p->next;
549 if (node->index & (1U<<31)) {
550 list_del_init(p);
551 list_add(p, &tree->chunks);
552 }
553 }
554
555 while (!list_empty(&tree->chunks)) {
556 struct node *node;
Al Viro74c3cbe2007-07-22 08:04:18 -0400557
558 node = list_entry(tree->chunks.next, struct node, list);
559
560 /* have we run out of marked? */
561 if (!(node->index & (1U<<31)))
562 break;
563
Al Viro8f7b0ba2008-11-15 01:15:43 +0000564 untag_chunk(node);
Al Viro74c3cbe2007-07-22 08:04:18 -0400565 }
566 if (!tree->root && !tree->goner) {
567 tree->goner = 1;
568 spin_unlock(&hash_lock);
569 mutex_lock(&audit_filter_mutex);
570 kill_rules(tree);
571 list_del_init(&tree->list);
572 mutex_unlock(&audit_filter_mutex);
573 prune_one(tree);
574 } else {
575 spin_unlock(&hash_lock);
576 }
577}
578
Al Viro916d7572009-06-24 00:02:38 -0400579static void audit_schedule_prune(void);
580
Al Viro74c3cbe2007-07-22 08:04:18 -0400581/* called with audit_filter_mutex */
582int audit_remove_tree_rule(struct audit_krule *rule)
583{
584 struct audit_tree *tree;
585 tree = rule->tree;
586 if (tree) {
587 spin_lock(&hash_lock);
588 list_del_init(&rule->rlist);
589 if (list_empty(&tree->rules) && !tree->goner) {
590 tree->root = NULL;
591 list_del_init(&tree->same_root);
592 tree->goner = 1;
593 list_move(&tree->list, &prune_list);
594 rule->tree = NULL;
595 spin_unlock(&hash_lock);
596 audit_schedule_prune();
597 return 1;
598 }
599 rule->tree = NULL;
600 spin_unlock(&hash_lock);
601 return 1;
602 }
603 return 0;
604}
605
Al Viro1f707132010-01-30 22:51:25 -0500606static int compare_root(struct vfsmount *mnt, void *arg)
607{
Jan Karaf410ff62016-12-16 10:13:37 +0100608 return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
609 (unsigned long)arg;
Al Viro1f707132010-01-30 22:51:25 -0500610}
611
Al Viro74c3cbe2007-07-22 08:04:18 -0400612void audit_trim_trees(void)
613{
614 struct list_head cursor;
615
616 mutex_lock(&audit_filter_mutex);
617 list_add(&cursor, &tree_list);
618 while (cursor.next != &tree_list) {
619 struct audit_tree *tree;
Al Viro98bc9932008-08-02 01:06:21 -0400620 struct path path;
Al Viro74c3cbe2007-07-22 08:04:18 -0400621 struct vfsmount *root_mnt;
622 struct node *node;
Al Viro74c3cbe2007-07-22 08:04:18 -0400623 int err;
624
625 tree = container_of(cursor.next, struct audit_tree, list);
626 get_tree(tree);
627 list_del(&cursor);
628 list_add(&cursor, &tree->list);
629 mutex_unlock(&audit_filter_mutex);
630
Al Viro98bc9932008-08-02 01:06:21 -0400631 err = kern_path(tree->pathname, 0, &path);
Al Viro74c3cbe2007-07-22 08:04:18 -0400632 if (err)
633 goto skip_it;
634
Al Viro589ff872009-04-18 03:28:19 -0400635 root_mnt = collect_mounts(&path);
Al Viro98bc9932008-08-02 01:06:21 -0400636 path_put(&path);
David Howellsbe34d1a2012-06-25 12:55:18 +0100637 if (IS_ERR(root_mnt))
Al Viro74c3cbe2007-07-22 08:04:18 -0400638 goto skip_it;
639
Al Viro74c3cbe2007-07-22 08:04:18 -0400640 spin_lock(&hash_lock);
641 list_for_each_entry(node, &tree->chunks, list) {
Eric Paris28a3a7e2009-12-17 20:12:05 -0500642 struct audit_chunk *chunk = find_chunk(node);
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300643 /* this could be NULL if the watch is dying else where... */
Al Viro74c3cbe2007-07-22 08:04:18 -0400644 node->index |= 1U<<31;
Jan Karaf410ff62016-12-16 10:13:37 +0100645 if (iterate_mounts(compare_root,
Jan Kara8d20d6e2018-11-12 09:54:48 -0500646 (void *)(chunk->key),
Jan Karaf410ff62016-12-16 10:13:37 +0100647 root_mnt))
Al Viro1f707132010-01-30 22:51:25 -0500648 node->index &= ~(1U<<31);
Al Viro74c3cbe2007-07-22 08:04:18 -0400649 }
650 spin_unlock(&hash_lock);
651 trim_marked(tree);
Al Viro74c3cbe2007-07-22 08:04:18 -0400652 drop_collected_mounts(root_mnt);
653skip_it:
Chen Gang12b2f112013-04-29 15:05:19 -0700654 put_tree(tree);
Al Viro74c3cbe2007-07-22 08:04:18 -0400655 mutex_lock(&audit_filter_mutex);
656 }
657 list_del(&cursor);
658 mutex_unlock(&audit_filter_mutex);
659}
660
Al Viro74c3cbe2007-07-22 08:04:18 -0400661int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
662{
663
664 if (pathname[0] != '/' ||
665 rule->listnr != AUDIT_FILTER_EXIT ||
Al Viro5af75d82008-12-16 05:59:26 -0500666 op != Audit_equal ||
Al Viro74c3cbe2007-07-22 08:04:18 -0400667 rule->inode_f || rule->watch || rule->tree)
668 return -EINVAL;
669 rule->tree = alloc_tree(pathname);
670 if (!rule->tree)
671 return -ENOMEM;
672 return 0;
673}
674
675void audit_put_tree(struct audit_tree *tree)
676{
677 put_tree(tree);
678}
679
Al Viro1f707132010-01-30 22:51:25 -0500680static int tag_mount(struct vfsmount *mnt, void *arg)
681{
David Howells3b362152015-03-17 22:26:21 +0000682 return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
Al Viro1f707132010-01-30 22:51:25 -0500683}
684
Imre Palikf1aaf262015-02-23 15:37:59 -0500685/*
686 * That gets run when evict_chunk() ends up needing to kill audit_tree.
687 * Runs from a separate thread.
688 */
689static int prune_tree_thread(void *unused)
690{
691 for (;;) {
Jiri Slaby0bf676d2016-03-31 10:49:28 +0200692 if (list_empty(&prune_list)) {
693 set_current_state(TASK_INTERRUPTIBLE);
Imre Palikf1aaf262015-02-23 15:37:59 -0500694 schedule();
Jiri Slaby0bf676d2016-03-31 10:49:28 +0200695 }
Imre Palikf1aaf262015-02-23 15:37:59 -0500696
Paul Moorece423632018-02-20 09:52:38 -0500697 audit_ctl_lock();
Imre Palikf1aaf262015-02-23 15:37:59 -0500698 mutex_lock(&audit_filter_mutex);
699
700 while (!list_empty(&prune_list)) {
701 struct audit_tree *victim;
702
703 victim = list_entry(prune_list.next,
704 struct audit_tree, list);
705 list_del_init(&victim->list);
706
707 mutex_unlock(&audit_filter_mutex);
708
709 prune_one(victim);
710
711 mutex_lock(&audit_filter_mutex);
712 }
713
714 mutex_unlock(&audit_filter_mutex);
Paul Moorece423632018-02-20 09:52:38 -0500715 audit_ctl_unlock();
Imre Palikf1aaf262015-02-23 15:37:59 -0500716 }
717 return 0;
718}
719
720static int audit_launch_prune(void)
721{
722 if (prune_thread)
723 return 0;
Jiri Slaby0bf676d2016-03-31 10:49:28 +0200724 prune_thread = kthread_run(prune_tree_thread, NULL,
Imre Palikf1aaf262015-02-23 15:37:59 -0500725 "audit_prune_tree");
726 if (IS_ERR(prune_thread)) {
727 pr_err("cannot start thread audit_prune_tree");
728 prune_thread = NULL;
729 return -ENOMEM;
Imre Palikf1aaf262015-02-23 15:37:59 -0500730 }
Jiri Slaby0bf676d2016-03-31 10:49:28 +0200731 return 0;
Imre Palikf1aaf262015-02-23 15:37:59 -0500732}
733
Al Viro74c3cbe2007-07-22 08:04:18 -0400734/* called with audit_filter_mutex */
735int audit_add_tree_rule(struct audit_krule *rule)
736{
737 struct audit_tree *seed = rule->tree, *tree;
Al Viro98bc9932008-08-02 01:06:21 -0400738 struct path path;
Al Viro1f707132010-01-30 22:51:25 -0500739 struct vfsmount *mnt;
Al Viro74c3cbe2007-07-22 08:04:18 -0400740 int err;
741
Chen Gang736f3202013-06-12 14:05:07 -0700742 rule->tree = NULL;
Al Viro74c3cbe2007-07-22 08:04:18 -0400743 list_for_each_entry(tree, &tree_list, list) {
744 if (!strcmp(seed->pathname, tree->pathname)) {
745 put_tree(seed);
746 rule->tree = tree;
747 list_add(&rule->rlist, &tree->rules);
748 return 0;
749 }
750 }
751 tree = seed;
752 list_add(&tree->list, &tree_list);
753 list_add(&rule->rlist, &tree->rules);
754 /* do not set rule->tree yet */
755 mutex_unlock(&audit_filter_mutex);
756
Imre Palikf1aaf262015-02-23 15:37:59 -0500757 if (unlikely(!prune_thread)) {
758 err = audit_launch_prune();
759 if (err)
760 goto Err;
761 }
762
Al Viro98bc9932008-08-02 01:06:21 -0400763 err = kern_path(tree->pathname, 0, &path);
Al Viro74c3cbe2007-07-22 08:04:18 -0400764 if (err)
765 goto Err;
Al Viro589ff872009-04-18 03:28:19 -0400766 mnt = collect_mounts(&path);
Al Viro98bc9932008-08-02 01:06:21 -0400767 path_put(&path);
David Howellsbe34d1a2012-06-25 12:55:18 +0100768 if (IS_ERR(mnt)) {
769 err = PTR_ERR(mnt);
Al Viro74c3cbe2007-07-22 08:04:18 -0400770 goto Err;
771 }
Al Viro74c3cbe2007-07-22 08:04:18 -0400772
773 get_tree(tree);
Al Viro1f707132010-01-30 22:51:25 -0500774 err = iterate_mounts(tag_mount, tree, mnt);
Al Viro74c3cbe2007-07-22 08:04:18 -0400775 drop_collected_mounts(mnt);
776
777 if (!err) {
778 struct node *node;
779 spin_lock(&hash_lock);
780 list_for_each_entry(node, &tree->chunks, list)
781 node->index &= ~(1U<<31);
782 spin_unlock(&hash_lock);
783 } else {
784 trim_marked(tree);
785 goto Err;
786 }
787
788 mutex_lock(&audit_filter_mutex);
789 if (list_empty(&rule->rlist)) {
790 put_tree(tree);
791 return -ENOENT;
792 }
793 rule->tree = tree;
794 put_tree(tree);
795
796 return 0;
797Err:
798 mutex_lock(&audit_filter_mutex);
799 list_del_init(&tree->list);
800 list_del_init(&tree->rules);
801 put_tree(tree);
802 return err;
803}
804
805int audit_tag_tree(char *old, char *new)
806{
807 struct list_head cursor, barrier;
808 int failed = 0;
Al Viro2096f752010-01-30 13:16:21 -0500809 struct path path1, path2;
Al Viro74c3cbe2007-07-22 08:04:18 -0400810 struct vfsmount *tagged;
Al Viro74c3cbe2007-07-22 08:04:18 -0400811 int err;
812
Al Viro2096f752010-01-30 13:16:21 -0500813 err = kern_path(new, 0, &path2);
Al Viro74c3cbe2007-07-22 08:04:18 -0400814 if (err)
815 return err;
Al Viro2096f752010-01-30 13:16:21 -0500816 tagged = collect_mounts(&path2);
817 path_put(&path2);
David Howellsbe34d1a2012-06-25 12:55:18 +0100818 if (IS_ERR(tagged))
819 return PTR_ERR(tagged);
Al Viro74c3cbe2007-07-22 08:04:18 -0400820
Al Viro2096f752010-01-30 13:16:21 -0500821 err = kern_path(old, 0, &path1);
Al Viro74c3cbe2007-07-22 08:04:18 -0400822 if (err) {
823 drop_collected_mounts(tagged);
824 return err;
825 }
Al Viro74c3cbe2007-07-22 08:04:18 -0400826
Al Viro74c3cbe2007-07-22 08:04:18 -0400827 mutex_lock(&audit_filter_mutex);
828 list_add(&barrier, &tree_list);
829 list_add(&cursor, &barrier);
830
831 while (cursor.next != &tree_list) {
832 struct audit_tree *tree;
Al Viro2096f752010-01-30 13:16:21 -0500833 int good_one = 0;
Al Viro74c3cbe2007-07-22 08:04:18 -0400834
835 tree = container_of(cursor.next, struct audit_tree, list);
836 get_tree(tree);
837 list_del(&cursor);
838 list_add(&cursor, &tree->list);
839 mutex_unlock(&audit_filter_mutex);
840
Al Viro2096f752010-01-30 13:16:21 -0500841 err = kern_path(tree->pathname, 0, &path2);
842 if (!err) {
843 good_one = path_is_under(&path1, &path2);
844 path_put(&path2);
Al Viro74c3cbe2007-07-22 08:04:18 -0400845 }
846
Al Viro2096f752010-01-30 13:16:21 -0500847 if (!good_one) {
Al Viro74c3cbe2007-07-22 08:04:18 -0400848 put_tree(tree);
849 mutex_lock(&audit_filter_mutex);
850 continue;
851 }
Al Viro74c3cbe2007-07-22 08:04:18 -0400852
Al Viro1f707132010-01-30 22:51:25 -0500853 failed = iterate_mounts(tag_mount, tree, tagged);
Al Viro74c3cbe2007-07-22 08:04:18 -0400854 if (failed) {
855 put_tree(tree);
856 mutex_lock(&audit_filter_mutex);
857 break;
858 }
859
860 mutex_lock(&audit_filter_mutex);
861 spin_lock(&hash_lock);
862 if (!tree->goner) {
863 list_del(&tree->list);
864 list_add(&tree->list, &tree_list);
865 }
866 spin_unlock(&hash_lock);
867 put_tree(tree);
868 }
869
870 while (barrier.prev != &tree_list) {
871 struct audit_tree *tree;
872
873 tree = container_of(barrier.prev, struct audit_tree, list);
874 get_tree(tree);
875 list_del(&tree->list);
876 list_add(&tree->list, &barrier);
877 mutex_unlock(&audit_filter_mutex);
878
879 if (!failed) {
880 struct node *node;
881 spin_lock(&hash_lock);
882 list_for_each_entry(node, &tree->chunks, list)
883 node->index &= ~(1U<<31);
884 spin_unlock(&hash_lock);
885 } else {
886 trim_marked(tree);
887 }
888
889 put_tree(tree);
890 mutex_lock(&audit_filter_mutex);
891 }
892 list_del(&barrier);
893 list_del(&cursor);
Al Viro74c3cbe2007-07-22 08:04:18 -0400894 mutex_unlock(&audit_filter_mutex);
Al Viro2096f752010-01-30 13:16:21 -0500895 path_put(&path1);
Al Viro74c3cbe2007-07-22 08:04:18 -0400896 drop_collected_mounts(tagged);
897 return failed;
898}
899
Al Viro916d7572009-06-24 00:02:38 -0400900
901static void audit_schedule_prune(void)
902{
Imre Palikf1aaf262015-02-23 15:37:59 -0500903 wake_up_process(prune_thread);
Al Viro916d7572009-06-24 00:02:38 -0400904}
905
906/*
907 * ... and that one is done if evict_chunk() decides to delay until the end
908 * of syscall. Runs synchronously.
909 */
910void audit_kill_trees(struct list_head *list)
911{
Paul Moorece423632018-02-20 09:52:38 -0500912 audit_ctl_lock();
Al Viro916d7572009-06-24 00:02:38 -0400913 mutex_lock(&audit_filter_mutex);
914
915 while (!list_empty(list)) {
916 struct audit_tree *victim;
917
918 victim = list_entry(list->next, struct audit_tree, list);
919 kill_rules(victim);
920 list_del_init(&victim->list);
921
922 mutex_unlock(&audit_filter_mutex);
923
924 prune_one(victim);
925
926 mutex_lock(&audit_filter_mutex);
927 }
928
929 mutex_unlock(&audit_filter_mutex);
Paul Moorece423632018-02-20 09:52:38 -0500930 audit_ctl_unlock();
Al Viro74c3cbe2007-07-22 08:04:18 -0400931}
932
933/*
934 * Here comes the stuff asynchronous to auditctl operations
935 */
936
Al Viro74c3cbe2007-07-22 08:04:18 -0400937static void evict_chunk(struct audit_chunk *chunk)
938{
939 struct audit_tree *owner;
Al Viro916d7572009-06-24 00:02:38 -0400940 struct list_head *postponed = audit_killed_trees();
941 int need_prune = 0;
Al Viro74c3cbe2007-07-22 08:04:18 -0400942 int n;
943
944 if (chunk->dead)
945 return;
946
947 chunk->dead = 1;
948 mutex_lock(&audit_filter_mutex);
949 spin_lock(&hash_lock);
950 while (!list_empty(&chunk->trees)) {
951 owner = list_entry(chunk->trees.next,
952 struct audit_tree, same_root);
953 owner->goner = 1;
954 owner->root = NULL;
955 list_del_init(&owner->same_root);
956 spin_unlock(&hash_lock);
Al Viro916d7572009-06-24 00:02:38 -0400957 if (!postponed) {
958 kill_rules(owner);
959 list_move(&owner->list, &prune_list);
960 need_prune = 1;
961 } else {
962 list_move(&owner->list, postponed);
963 }
Al Viro74c3cbe2007-07-22 08:04:18 -0400964 spin_lock(&hash_lock);
965 }
966 list_del_rcu(&chunk->hash);
967 for (n = 0; n < chunk->count; n++)
968 list_del_init(&chunk->owners[n].list);
969 spin_unlock(&hash_lock);
Imre Palikf1aaf262015-02-23 15:37:59 -0500970 mutex_unlock(&audit_filter_mutex);
Al Viro916d7572009-06-24 00:02:38 -0400971 if (need_prune)
972 audit_schedule_prune();
Al Viro74c3cbe2007-07-22 08:04:18 -0400973}
974
Eric Paris3a9b16b2010-07-28 10:18:38 -0400975static int audit_tree_handle_event(struct fsnotify_group *group,
Jan Kara7053aee2014-01-21 15:48:14 -0800976 struct inode *to_tell,
Al Viro3cd5eca2016-11-20 20:19:09 -0500977 u32 mask, const void *data, int data_type,
Jan Kara9385a842016-11-10 17:51:50 +0100978 const unsigned char *file_name, u32 cookie,
979 struct fsnotify_iter_info *iter_info)
Al Viro74c3cbe2007-07-22 08:04:18 -0400980{
Jan Kara83c4c4b2014-01-21 15:48:15 -0800981 return 0;
Al Viro74c3cbe2007-07-22 08:04:18 -0400982}
983
Eric Parise61ce862009-12-17 21:24:24 -0500984static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
Al Viro74c3cbe2007-07-22 08:04:18 -0400985{
Eric Paris28a3a7e2009-12-17 20:12:05 -0500986 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
987
988 evict_chunk(chunk);
Miklos Szeredib3e86922012-08-15 12:55:22 +0200989
990 /*
991 * We are guaranteed to have at least one reference to the mark from
992 * either the inode or the caller of fsnotify_destroy_mark().
993 */
Elena Reshetovaab97f8732017-10-20 13:26:02 +0300994 BUG_ON(refcount_read(&entry->refcnt) < 1);
Al Viro74c3cbe2007-07-22 08:04:18 -0400995}
996
Eric Paris28a3a7e2009-12-17 20:12:05 -0500997static const struct fsnotify_ops audit_tree_ops = {
998 .handle_event = audit_tree_handle_event,
Eric Paris28a3a7e2009-12-17 20:12:05 -0500999 .freeing_mark = audit_tree_freeing_mark,
Jan Kara054c6362016-12-21 18:06:12 +01001000 .free_mark = audit_tree_destroy_watch,
Al Viro74c3cbe2007-07-22 08:04:18 -04001001};
1002
1003static int __init audit_tree_init(void)
1004{
1005 int i;
1006
Eric Paris0d2e2a12009-12-17 21:24:22 -05001007 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
Eric Paris28a3a7e2009-12-17 20:12:05 -05001008 if (IS_ERR(audit_tree_group))
1009 audit_panic("cannot initialize fsnotify group for rectree watches");
Al Viro74c3cbe2007-07-22 08:04:18 -04001010
1011 for (i = 0; i < HASH_SIZE; i++)
1012 INIT_LIST_HEAD(&chunk_hash_heads[i]);
1013
1014 return 0;
1015}
1016__initcall(audit_tree_init);