blob: 2fa8d61b6fd2166e55de50c6ffa50f2868a3d6b9 [file] [log] [blame]
Al Viro74c3cbe2007-07-22 08:04:18 -04001#include "audit.h"
Eric Paris28a3a7e2009-12-17 20:12:05 -05002#include <linux/fsnotify_backend.h>
Al Viro74c3cbe2007-07-22 08:04:18 -04003#include <linux/namei.h>
4#include <linux/mount.h>
Al Viro916d7572009-06-24 00:02:38 -04005#include <linux/kthread.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09006#include <linux/slab.h>
Al Viro74c3cbe2007-07-22 08:04:18 -04007
8struct audit_tree;
9struct audit_chunk;
10
11struct audit_tree {
12 atomic_t count;
13 int goner;
14 struct audit_chunk *root;
15 struct list_head chunks;
16 struct list_head rules;
17 struct list_head list;
18 struct list_head same_root;
19 struct rcu_head head;
20 char pathname[];
21};
22
23struct audit_chunk {
24 struct list_head hash;
Eric Parise61ce862009-12-17 21:24:24 -050025 struct fsnotify_mark mark;
Al Viro74c3cbe2007-07-22 08:04:18 -040026 struct list_head trees; /* with root here */
27 int dead;
28 int count;
Al Viro8f7b0ba2008-11-15 01:15:43 +000029 atomic_long_t refs;
Al Viro74c3cbe2007-07-22 08:04:18 -040030 struct rcu_head head;
31 struct node {
32 struct list_head list;
33 struct audit_tree *owner;
34 unsigned index; /* index; upper bit indicates 'will prune' */
35 } owners[];
36};
37
38static LIST_HEAD(tree_list);
39static LIST_HEAD(prune_list);
Imre Palikf1aaf262015-02-23 15:37:59 -050040static struct task_struct *prune_thread;
Al Viro74c3cbe2007-07-22 08:04:18 -040041
42/*
43 * One struct chunk is attached to each inode of interest.
44 * We replace struct chunk on tagging/untagging.
45 * Rules have pointer to struct audit_tree.
46 * Rules have struct list_head rlist forming a list of rules over
47 * the same tree.
48 * References to struct chunk are collected at audit_inode{,_child}()
49 * time and used in AUDIT_TREE rule matching.
50 * These references are dropped at the same time we are calling
51 * audit_free_names(), etc.
52 *
53 * Cyclic lists galore:
54 * tree.chunks anchors chunk.owners[].list hash_lock
55 * tree.rules anchors rule.rlist audit_filter_mutex
56 * chunk.trees anchors tree.same_root hash_lock
57 * chunk.hash is a hash with middle bits of watch.inode as
58 * a hash function. RCU, hash_lock
59 *
60 * tree is refcounted; one reference for "some rules on rules_list refer to
61 * it", one for each chunk with pointer to it.
62 *
Eric Paris28a3a7e2009-12-17 20:12:05 -050063 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
Al Viro8f7b0ba2008-11-15 01:15:43 +000064 * of watch contributes 1 to .refs).
Al Viro74c3cbe2007-07-22 08:04:18 -040065 *
66 * node.index allows to get from node.list to containing chunk.
67 * MSB of that sucker is stolen to mark taggings that we might have to
68 * revert - several operations have very unpleasant cleanup logics and
69 * that makes a difference. Some.
70 */
71
Eric Paris28a3a7e2009-12-17 20:12:05 -050072static struct fsnotify_group *audit_tree_group;
Al Viro74c3cbe2007-07-22 08:04:18 -040073
74static struct audit_tree *alloc_tree(const char *s)
75{
76 struct audit_tree *tree;
77
78 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
79 if (tree) {
80 atomic_set(&tree->count, 1);
81 tree->goner = 0;
82 INIT_LIST_HEAD(&tree->chunks);
83 INIT_LIST_HEAD(&tree->rules);
84 INIT_LIST_HEAD(&tree->list);
85 INIT_LIST_HEAD(&tree->same_root);
86 tree->root = NULL;
87 strcpy(tree->pathname, s);
88 }
89 return tree;
90}
91
92static inline void get_tree(struct audit_tree *tree)
93{
94 atomic_inc(&tree->count);
95}
96
Al Viro74c3cbe2007-07-22 08:04:18 -040097static inline void put_tree(struct audit_tree *tree)
98{
99 if (atomic_dec_and_test(&tree->count))
Lai Jiangshan3b097c42011-03-15 18:03:53 +0800100 kfree_rcu(tree, head);
Al Viro74c3cbe2007-07-22 08:04:18 -0400101}
102
103/* to avoid bringing the entire thing in audit.h */
104const char *audit_tree_path(struct audit_tree *tree)
105{
106 return tree->pathname;
107}
108
Al Viro8f7b0ba2008-11-15 01:15:43 +0000109static void free_chunk(struct audit_chunk *chunk)
Al Viro74c3cbe2007-07-22 08:04:18 -0400110{
Al Viro74c3cbe2007-07-22 08:04:18 -0400111 int i;
112
113 for (i = 0; i < chunk->count; i++) {
114 if (chunk->owners[i].owner)
115 put_tree(chunk->owners[i].owner);
116 }
117 kfree(chunk);
118}
119
Al Viro74c3cbe2007-07-22 08:04:18 -0400120void audit_put_chunk(struct audit_chunk *chunk)
121{
Al Viro8f7b0ba2008-11-15 01:15:43 +0000122 if (atomic_long_dec_and_test(&chunk->refs))
123 free_chunk(chunk);
124}
125
126static void __put_chunk(struct rcu_head *rcu)
127{
128 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
129 audit_put_chunk(chunk);
Al Viro74c3cbe2007-07-22 08:04:18 -0400130}
131
Eric Parise61ce862009-12-17 21:24:24 -0500132static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
Eric Paris28a3a7e2009-12-17 20:12:05 -0500133{
134 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
135 call_rcu(&chunk->head, __put_chunk);
136}
137
138static struct audit_chunk *alloc_chunk(int count)
139{
140 struct audit_chunk *chunk;
141 size_t size;
142 int i;
143
144 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
145 chunk = kzalloc(size, GFP_KERNEL);
146 if (!chunk)
147 return NULL;
148
149 INIT_LIST_HEAD(&chunk->hash);
150 INIT_LIST_HEAD(&chunk->trees);
151 chunk->count = count;
152 atomic_long_set(&chunk->refs, 1);
153 for (i = 0; i < count; i++) {
154 INIT_LIST_HEAD(&chunk->owners[i].list);
155 chunk->owners[i].index = i;
156 }
157 fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
Miklos Szeredi799b6012014-11-04 11:27:12 +0100158 chunk->mark.mask = FS_IN_IGNORED;
Eric Paris28a3a7e2009-12-17 20:12:05 -0500159 return chunk;
160}
161
Al Viro74c3cbe2007-07-22 08:04:18 -0400162enum {HASH_SIZE = 128};
163static struct list_head chunk_hash_heads[HASH_SIZE];
164static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
165
Jan Karaf410ff62016-12-16 10:13:37 +0100166/* Function to return search key in our hash from inode. */
167static unsigned long inode_to_key(const struct inode *inode)
Al Viro74c3cbe2007-07-22 08:04:18 -0400168{
Jan Karaf410ff62016-12-16 10:13:37 +0100169 return (unsigned long)inode;
170}
171
172/*
173 * Function to return search key in our hash from chunk. Key 0 is special and
174 * should never be present in the hash.
175 */
176static unsigned long chunk_to_key(struct audit_chunk *chunk)
177{
Jan Kara6b3f05d2016-12-21 12:15:30 +0100178 /*
179 * We have a reference to the mark so it should be attached to a
180 * connector.
181 */
182 if (WARN_ON_ONCE(!chunk->mark.connector))
183 return 0;
184 return (unsigned long)chunk->mark.connector->inode;
Jan Karaf410ff62016-12-16 10:13:37 +0100185}
186
187static inline struct list_head *chunk_hash(unsigned long key)
188{
189 unsigned long n = key / L1_CACHE_BYTES;
Al Viro74c3cbe2007-07-22 08:04:18 -0400190 return chunk_hash_heads + n % HASH_SIZE;
191}
192
Eric Paris28a3a7e2009-12-17 20:12:05 -0500193/* hash_lock & entry->lock is held by caller */
Al Viro74c3cbe2007-07-22 08:04:18 -0400194static void insert_hash(struct audit_chunk *chunk)
195{
Jan Kara6b3f05d2016-12-21 12:15:30 +0100196 unsigned long key = chunk_to_key(chunk);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500197 struct list_head *list;
198
Jan Kara43471d12017-04-03 16:47:58 +0200199 if (!(chunk->mark.flags & FSNOTIFY_MARK_FLAG_ATTACHED))
Eric Paris28a3a7e2009-12-17 20:12:05 -0500200 return;
Jan Karaf410ff62016-12-16 10:13:37 +0100201 list = chunk_hash(key);
Al Viro74c3cbe2007-07-22 08:04:18 -0400202 list_add_rcu(&chunk->hash, list);
203}
204
205/* called under rcu_read_lock */
206struct audit_chunk *audit_tree_lookup(const struct inode *inode)
207{
Jan Karaf410ff62016-12-16 10:13:37 +0100208 unsigned long key = inode_to_key(inode);
209 struct list_head *list = chunk_hash(key);
Paul E. McKenney6793a052008-05-14 17:10:12 -0700210 struct audit_chunk *p;
Al Viro74c3cbe2007-07-22 08:04:18 -0400211
Paul E. McKenney6793a052008-05-14 17:10:12 -0700212 list_for_each_entry_rcu(p, list, hash) {
Jan Karaf410ff62016-12-16 10:13:37 +0100213 if (chunk_to_key(p) == key) {
Al Viro8f7b0ba2008-11-15 01:15:43 +0000214 atomic_long_inc(&p->refs);
Al Viro74c3cbe2007-07-22 08:04:18 -0400215 return p;
216 }
217 }
218 return NULL;
219}
220
Yaowei Bai6f1b5d72015-11-04 08:23:51 -0500221bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
Al Viro74c3cbe2007-07-22 08:04:18 -0400222{
223 int n;
224 for (n = 0; n < chunk->count; n++)
225 if (chunk->owners[n].owner == tree)
Yaowei Bai6f1b5d72015-11-04 08:23:51 -0500226 return true;
227 return false;
Al Viro74c3cbe2007-07-22 08:04:18 -0400228}
229
230/* tagging and untagging inodes with trees */
231
Al Viro8f7b0ba2008-11-15 01:15:43 +0000232static struct audit_chunk *find_chunk(struct node *p)
Al Viro74c3cbe2007-07-22 08:04:18 -0400233{
Al Viro8f7b0ba2008-11-15 01:15:43 +0000234 int index = p->index & ~(1U<<31);
235 p -= index;
236 return container_of(p, struct audit_chunk, owners[0]);
237}
238
239static void untag_chunk(struct node *p)
240{
241 struct audit_chunk *chunk = find_chunk(p);
Eric Parise61ce862009-12-17 21:24:24 -0500242 struct fsnotify_mark *entry = &chunk->mark;
Al Virof7a998a2010-10-30 02:18:32 -0400243 struct audit_chunk *new = NULL;
Al Viro74c3cbe2007-07-22 08:04:18 -0400244 struct audit_tree *owner;
245 int size = chunk->count - 1;
246 int i, j;
247
Eric Paris28a3a7e2009-12-17 20:12:05 -0500248 fsnotify_get_mark(entry);
Al Viro8f7b0ba2008-11-15 01:15:43 +0000249
250 spin_unlock(&hash_lock);
251
Al Virof7a998a2010-10-30 02:18:32 -0400252 if (size)
253 new = alloc_chunk(size);
254
Jan Karabe29d202016-12-14 14:40:05 +0100255 mutex_lock(&entry->group->mark_mutex);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500256 spin_lock(&entry->lock);
Jan Kara6b3f05d2016-12-21 12:15:30 +0100257 /*
258 * mark_mutex protects mark from getting detached and thus also from
259 * mark->connector->inode getting NULL.
260 */
Jan Kara43471d12017-04-03 16:47:58 +0200261 if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
Eric Paris28a3a7e2009-12-17 20:12:05 -0500262 spin_unlock(&entry->lock);
Jan Karabe29d202016-12-14 14:40:05 +0100263 mutex_unlock(&entry->group->mark_mutex);
Al Virof7a998a2010-10-30 02:18:32 -0400264 if (new)
265 free_chunk(new);
Al Viro8f7b0ba2008-11-15 01:15:43 +0000266 goto out;
Al Viro74c3cbe2007-07-22 08:04:18 -0400267 }
268
269 owner = p->owner;
270
271 if (!size) {
272 chunk->dead = 1;
273 spin_lock(&hash_lock);
274 list_del_init(&chunk->trees);
275 if (owner->root == chunk)
276 owner->root = NULL;
277 list_del_init(&p->list);
278 list_del_rcu(&chunk->hash);
279 spin_unlock(&hash_lock);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500280 spin_unlock(&entry->lock);
Jan Karabe29d202016-12-14 14:40:05 +0100281 mutex_unlock(&entry->group->mark_mutex);
Lino Sanfilippoe2a29942011-06-14 17:29:51 +0200282 fsnotify_destroy_mark(entry, audit_tree_group);
Al Viro8f7b0ba2008-11-15 01:15:43 +0000283 goto out;
Al Viro74c3cbe2007-07-22 08:04:18 -0400284 }
285
Al Viro74c3cbe2007-07-22 08:04:18 -0400286 if (!new)
287 goto Fallback;
Al Virof7a998a2010-10-30 02:18:32 -0400288
Jan Kara86ffe242017-03-14 14:29:35 +0100289 if (fsnotify_add_mark_locked(&new->mark, entry->group,
290 entry->connector->inode, NULL, 1)) {
Miklos Szeredi0fe33aa2012-08-15 12:55:22 +0200291 fsnotify_put_mark(&new->mark);
Al Viro74c3cbe2007-07-22 08:04:18 -0400292 goto Fallback;
293 }
294
295 chunk->dead = 1;
296 spin_lock(&hash_lock);
297 list_replace_init(&chunk->trees, &new->trees);
298 if (owner->root == chunk) {
299 list_del_init(&owner->same_root);
300 owner->root = NULL;
301 }
302
Al Viro6f5d5112009-12-19 15:59:45 +0000303 for (i = j = 0; j <= size; i++, j++) {
Al Viro74c3cbe2007-07-22 08:04:18 -0400304 struct audit_tree *s;
305 if (&chunk->owners[j] == p) {
306 list_del_init(&p->list);
307 i--;
308 continue;
309 }
310 s = chunk->owners[j].owner;
311 new->owners[i].owner = s;
312 new->owners[i].index = chunk->owners[j].index - j + i;
313 if (!s) /* result of earlier fallback */
314 continue;
315 get_tree(s);
Al Viro6f5d5112009-12-19 15:59:45 +0000316 list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
Al Viro74c3cbe2007-07-22 08:04:18 -0400317 }
318
319 list_replace_rcu(&chunk->hash, &new->hash);
320 list_for_each_entry(owner, &new->trees, same_root)
321 owner->root = new;
322 spin_unlock(&hash_lock);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500323 spin_unlock(&entry->lock);
Jan Karabe29d202016-12-14 14:40:05 +0100324 mutex_unlock(&entry->group->mark_mutex);
Lino Sanfilippoe2a29942011-06-14 17:29:51 +0200325 fsnotify_destroy_mark(entry, audit_tree_group);
Miklos Szeredib3e86922012-08-15 12:55:22 +0200326 fsnotify_put_mark(&new->mark); /* drop initial reference */
Al Viro8f7b0ba2008-11-15 01:15:43 +0000327 goto out;
Al Viro74c3cbe2007-07-22 08:04:18 -0400328
329Fallback:
330 // do the best we can
331 spin_lock(&hash_lock);
332 if (owner->root == chunk) {
333 list_del_init(&owner->same_root);
334 owner->root = NULL;
335 }
336 list_del_init(&p->list);
337 p->owner = NULL;
338 put_tree(owner);
339 spin_unlock(&hash_lock);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500340 spin_unlock(&entry->lock);
Jan Karabe29d202016-12-14 14:40:05 +0100341 mutex_unlock(&entry->group->mark_mutex);
Al Viro8f7b0ba2008-11-15 01:15:43 +0000342out:
Eric Paris28a3a7e2009-12-17 20:12:05 -0500343 fsnotify_put_mark(entry);
Al Viro8f7b0ba2008-11-15 01:15:43 +0000344 spin_lock(&hash_lock);
Al Viro74c3cbe2007-07-22 08:04:18 -0400345}
346
347static int create_chunk(struct inode *inode, struct audit_tree *tree)
348{
Eric Parise61ce862009-12-17 21:24:24 -0500349 struct fsnotify_mark *entry;
Al Viro74c3cbe2007-07-22 08:04:18 -0400350 struct audit_chunk *chunk = alloc_chunk(1);
351 if (!chunk)
352 return -ENOMEM;
353
Eric Paris28a3a7e2009-12-17 20:12:05 -0500354 entry = &chunk->mark;
Eric Paris5444e292009-12-17 21:24:27 -0500355 if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
Miklos Szeredi0fe33aa2012-08-15 12:55:22 +0200356 fsnotify_put_mark(entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400357 return -ENOSPC;
358 }
359
Eric Paris28a3a7e2009-12-17 20:12:05 -0500360 spin_lock(&entry->lock);
Al Viro74c3cbe2007-07-22 08:04:18 -0400361 spin_lock(&hash_lock);
362 if (tree->goner) {
363 spin_unlock(&hash_lock);
364 chunk->dead = 1;
Eric Paris28a3a7e2009-12-17 20:12:05 -0500365 spin_unlock(&entry->lock);
Lino Sanfilippoe2a29942011-06-14 17:29:51 +0200366 fsnotify_destroy_mark(entry, audit_tree_group);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500367 fsnotify_put_mark(entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400368 return 0;
369 }
370 chunk->owners[0].index = (1U << 31);
371 chunk->owners[0].owner = tree;
372 get_tree(tree);
373 list_add(&chunk->owners[0].list, &tree->chunks);
374 if (!tree->root) {
375 tree->root = chunk;
376 list_add(&tree->same_root, &chunk->trees);
377 }
378 insert_hash(chunk);
379 spin_unlock(&hash_lock);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500380 spin_unlock(&entry->lock);
Miklos Szeredib3e86922012-08-15 12:55:22 +0200381 fsnotify_put_mark(entry); /* drop initial reference */
Al Viro74c3cbe2007-07-22 08:04:18 -0400382 return 0;
383}
384
385/* the first tagged inode becomes root of tree */
386static int tag_chunk(struct inode *inode, struct audit_tree *tree)
387{
Eric Parise61ce862009-12-17 21:24:24 -0500388 struct fsnotify_mark *old_entry, *chunk_entry;
Al Viro74c3cbe2007-07-22 08:04:18 -0400389 struct audit_tree *owner;
390 struct audit_chunk *chunk, *old;
391 struct node *p;
392 int n;
393
Eric Paris5444e292009-12-17 21:24:27 -0500394 old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500395 if (!old_entry)
Al Viro74c3cbe2007-07-22 08:04:18 -0400396 return create_chunk(inode, tree);
397
Eric Paris28a3a7e2009-12-17 20:12:05 -0500398 old = container_of(old_entry, struct audit_chunk, mark);
Al Viro74c3cbe2007-07-22 08:04:18 -0400399
400 /* are we already there? */
401 spin_lock(&hash_lock);
402 for (n = 0; n < old->count; n++) {
403 if (old->owners[n].owner == tree) {
404 spin_unlock(&hash_lock);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500405 fsnotify_put_mark(old_entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400406 return 0;
407 }
408 }
409 spin_unlock(&hash_lock);
410
411 chunk = alloc_chunk(old->count + 1);
Al Virob4c30aa2009-12-19 16:03:30 +0000412 if (!chunk) {
Eric Paris28a3a7e2009-12-17 20:12:05 -0500413 fsnotify_put_mark(old_entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400414 return -ENOMEM;
Al Virob4c30aa2009-12-19 16:03:30 +0000415 }
Al Viro74c3cbe2007-07-22 08:04:18 -0400416
Eric Paris28a3a7e2009-12-17 20:12:05 -0500417 chunk_entry = &chunk->mark;
418
Jan Karabe29d202016-12-14 14:40:05 +0100419 mutex_lock(&old_entry->group->mark_mutex);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500420 spin_lock(&old_entry->lock);
Jan Kara6b3f05d2016-12-21 12:15:30 +0100421 /*
422 * mark_mutex protects mark from getting detached and thus also from
423 * mark->connector->inode getting NULL.
424 */
Jan Kara43471d12017-04-03 16:47:58 +0200425 if (!(old_entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
Eric Paris28a3a7e2009-12-17 20:12:05 -0500426 /* old_entry is being shot, lets just lie */
427 spin_unlock(&old_entry->lock);
Jan Karabe29d202016-12-14 14:40:05 +0100428 mutex_unlock(&old_entry->group->mark_mutex);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500429 fsnotify_put_mark(old_entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400430 free_chunk(chunk);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500431 return -ENOENT;
432 }
433
Jan Karabe29d202016-12-14 14:40:05 +0100434 if (fsnotify_add_mark_locked(chunk_entry, old_entry->group,
Jan Kara86ffe242017-03-14 14:29:35 +0100435 old_entry->connector->inode, NULL, 1)) {
Eric Paris28a3a7e2009-12-17 20:12:05 -0500436 spin_unlock(&old_entry->lock);
Jan Karabe29d202016-12-14 14:40:05 +0100437 mutex_unlock(&old_entry->group->mark_mutex);
Miklos Szeredi0fe33aa2012-08-15 12:55:22 +0200438 fsnotify_put_mark(chunk_entry);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500439 fsnotify_put_mark(old_entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400440 return -ENOSPC;
441 }
Eric Paris28a3a7e2009-12-17 20:12:05 -0500442
443 /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
444 spin_lock(&chunk_entry->lock);
Al Viro74c3cbe2007-07-22 08:04:18 -0400445 spin_lock(&hash_lock);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500446
447 /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
Al Viro74c3cbe2007-07-22 08:04:18 -0400448 if (tree->goner) {
449 spin_unlock(&hash_lock);
450 chunk->dead = 1;
Eric Paris28a3a7e2009-12-17 20:12:05 -0500451 spin_unlock(&chunk_entry->lock);
452 spin_unlock(&old_entry->lock);
Jan Karabe29d202016-12-14 14:40:05 +0100453 mutex_unlock(&old_entry->group->mark_mutex);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500454
Lino Sanfilippoe2a29942011-06-14 17:29:51 +0200455 fsnotify_destroy_mark(chunk_entry, audit_tree_group);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500456
457 fsnotify_put_mark(chunk_entry);
458 fsnotify_put_mark(old_entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400459 return 0;
460 }
461 list_replace_init(&old->trees, &chunk->trees);
462 for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
463 struct audit_tree *s = old->owners[n].owner;
464 p->owner = s;
465 p->index = old->owners[n].index;
466 if (!s) /* result of fallback in untag */
467 continue;
468 get_tree(s);
469 list_replace_init(&old->owners[n].list, &p->list);
470 }
471 p->index = (chunk->count - 1) | (1U<<31);
472 p->owner = tree;
473 get_tree(tree);
474 list_add(&p->list, &tree->chunks);
475 list_replace_rcu(&old->hash, &chunk->hash);
476 list_for_each_entry(owner, &chunk->trees, same_root)
477 owner->root = chunk;
478 old->dead = 1;
479 if (!tree->root) {
480 tree->root = chunk;
481 list_add(&tree->same_root, &chunk->trees);
482 }
483 spin_unlock(&hash_lock);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500484 spin_unlock(&chunk_entry->lock);
485 spin_unlock(&old_entry->lock);
Jan Karabe29d202016-12-14 14:40:05 +0100486 mutex_unlock(&old_entry->group->mark_mutex);
Lino Sanfilippoe2a29942011-06-14 17:29:51 +0200487 fsnotify_destroy_mark(old_entry, audit_tree_group);
Miklos Szeredib3e86922012-08-15 12:55:22 +0200488 fsnotify_put_mark(chunk_entry); /* drop initial reference */
Eric Paris28a3a7e2009-12-17 20:12:05 -0500489 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
Al Viro74c3cbe2007-07-22 08:04:18 -0400490 return 0;
491}
492
Richard Guy Briggs2991dd22014-10-02 22:05:24 -0400493static void audit_tree_log_remove_rule(struct audit_krule *rule)
Kees Cook0644ec02013-01-11 14:32:07 -0800494{
495 struct audit_buffer *ab;
496
497 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
498 if (unlikely(!ab))
499 return;
Steve Grubbc1e8f062016-11-16 16:14:33 -0500500 audit_log_format(ab, "op=remove_rule");
Kees Cook0644ec02013-01-11 14:32:07 -0800501 audit_log_format(ab, " dir=");
502 audit_log_untrustedstring(ab, rule->tree->pathname);
503 audit_log_key(ab, rule->filterkey);
504 audit_log_format(ab, " list=%d res=1", rule->listnr);
505 audit_log_end(ab);
506}
507
Al Viro74c3cbe2007-07-22 08:04:18 -0400508static void kill_rules(struct audit_tree *tree)
509{
510 struct audit_krule *rule, *next;
511 struct audit_entry *entry;
Al Viro74c3cbe2007-07-22 08:04:18 -0400512
513 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
514 entry = container_of(rule, struct audit_entry, rule);
515
516 list_del_init(&rule->rlist);
517 if (rule->tree) {
518 /* not a half-baked one */
Richard Guy Briggs2991dd22014-10-02 22:05:24 -0400519 audit_tree_log_remove_rule(rule);
Richard Guy Briggs34d99af52015-08-05 16:29:37 -0400520 if (entry->rule.exe)
521 audit_remove_mark(entry->rule.exe);
Al Viro74c3cbe2007-07-22 08:04:18 -0400522 rule->tree = NULL;
523 list_del_rcu(&entry->list);
Al Viroe45aa212008-12-15 01:17:50 -0500524 list_del(&entry->rule.list);
Al Viro74c3cbe2007-07-22 08:04:18 -0400525 call_rcu(&entry->rcu, audit_free_rule_rcu);
526 }
527 }
528}
529
530/*
531 * finish killing struct audit_tree
532 */
533static void prune_one(struct audit_tree *victim)
534{
535 spin_lock(&hash_lock);
536 while (!list_empty(&victim->chunks)) {
537 struct node *p;
Al Viro74c3cbe2007-07-22 08:04:18 -0400538
539 p = list_entry(victim->chunks.next, struct node, list);
Al Viro74c3cbe2007-07-22 08:04:18 -0400540
Al Viro8f7b0ba2008-11-15 01:15:43 +0000541 untag_chunk(p);
Al Viro74c3cbe2007-07-22 08:04:18 -0400542 }
543 spin_unlock(&hash_lock);
544 put_tree(victim);
545}
546
547/* trim the uncommitted chunks from tree */
548
549static void trim_marked(struct audit_tree *tree)
550{
551 struct list_head *p, *q;
552 spin_lock(&hash_lock);
553 if (tree->goner) {
554 spin_unlock(&hash_lock);
555 return;
556 }
557 /* reorder */
558 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
559 struct node *node = list_entry(p, struct node, list);
560 q = p->next;
561 if (node->index & (1U<<31)) {
562 list_del_init(p);
563 list_add(p, &tree->chunks);
564 }
565 }
566
567 while (!list_empty(&tree->chunks)) {
568 struct node *node;
Al Viro74c3cbe2007-07-22 08:04:18 -0400569
570 node = list_entry(tree->chunks.next, struct node, list);
571
572 /* have we run out of marked? */
573 if (!(node->index & (1U<<31)))
574 break;
575
Al Viro8f7b0ba2008-11-15 01:15:43 +0000576 untag_chunk(node);
Al Viro74c3cbe2007-07-22 08:04:18 -0400577 }
578 if (!tree->root && !tree->goner) {
579 tree->goner = 1;
580 spin_unlock(&hash_lock);
581 mutex_lock(&audit_filter_mutex);
582 kill_rules(tree);
583 list_del_init(&tree->list);
584 mutex_unlock(&audit_filter_mutex);
585 prune_one(tree);
586 } else {
587 spin_unlock(&hash_lock);
588 }
589}
590
Al Viro916d7572009-06-24 00:02:38 -0400591static void audit_schedule_prune(void);
592
Al Viro74c3cbe2007-07-22 08:04:18 -0400593/* called with audit_filter_mutex */
594int audit_remove_tree_rule(struct audit_krule *rule)
595{
596 struct audit_tree *tree;
597 tree = rule->tree;
598 if (tree) {
599 spin_lock(&hash_lock);
600 list_del_init(&rule->rlist);
601 if (list_empty(&tree->rules) && !tree->goner) {
602 tree->root = NULL;
603 list_del_init(&tree->same_root);
604 tree->goner = 1;
605 list_move(&tree->list, &prune_list);
606 rule->tree = NULL;
607 spin_unlock(&hash_lock);
608 audit_schedule_prune();
609 return 1;
610 }
611 rule->tree = NULL;
612 spin_unlock(&hash_lock);
613 return 1;
614 }
615 return 0;
616}
617
Al Viro1f707132010-01-30 22:51:25 -0500618static int compare_root(struct vfsmount *mnt, void *arg)
619{
Jan Karaf410ff62016-12-16 10:13:37 +0100620 return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
621 (unsigned long)arg;
Al Viro1f707132010-01-30 22:51:25 -0500622}
623
Al Viro74c3cbe2007-07-22 08:04:18 -0400624void audit_trim_trees(void)
625{
626 struct list_head cursor;
627
628 mutex_lock(&audit_filter_mutex);
629 list_add(&cursor, &tree_list);
630 while (cursor.next != &tree_list) {
631 struct audit_tree *tree;
Al Viro98bc9932008-08-02 01:06:21 -0400632 struct path path;
Al Viro74c3cbe2007-07-22 08:04:18 -0400633 struct vfsmount *root_mnt;
634 struct node *node;
Al Viro74c3cbe2007-07-22 08:04:18 -0400635 int err;
636
637 tree = container_of(cursor.next, struct audit_tree, list);
638 get_tree(tree);
639 list_del(&cursor);
640 list_add(&cursor, &tree->list);
641 mutex_unlock(&audit_filter_mutex);
642
Al Viro98bc9932008-08-02 01:06:21 -0400643 err = kern_path(tree->pathname, 0, &path);
Al Viro74c3cbe2007-07-22 08:04:18 -0400644 if (err)
645 goto skip_it;
646
Al Viro589ff872009-04-18 03:28:19 -0400647 root_mnt = collect_mounts(&path);
Al Viro98bc9932008-08-02 01:06:21 -0400648 path_put(&path);
David Howellsbe34d1a2012-06-25 12:55:18 +0100649 if (IS_ERR(root_mnt))
Al Viro74c3cbe2007-07-22 08:04:18 -0400650 goto skip_it;
651
Al Viro74c3cbe2007-07-22 08:04:18 -0400652 spin_lock(&hash_lock);
653 list_for_each_entry(node, &tree->chunks, list) {
Eric Paris28a3a7e2009-12-17 20:12:05 -0500654 struct audit_chunk *chunk = find_chunk(node);
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300655 /* this could be NULL if the watch is dying else where... */
Al Viro74c3cbe2007-07-22 08:04:18 -0400656 node->index |= 1U<<31;
Jan Karaf410ff62016-12-16 10:13:37 +0100657 if (iterate_mounts(compare_root,
658 (void *)chunk_to_key(chunk),
659 root_mnt))
Al Viro1f707132010-01-30 22:51:25 -0500660 node->index &= ~(1U<<31);
Al Viro74c3cbe2007-07-22 08:04:18 -0400661 }
662 spin_unlock(&hash_lock);
663 trim_marked(tree);
Al Viro74c3cbe2007-07-22 08:04:18 -0400664 drop_collected_mounts(root_mnt);
665skip_it:
Chen Gang12b2f112013-04-29 15:05:19 -0700666 put_tree(tree);
Al Viro74c3cbe2007-07-22 08:04:18 -0400667 mutex_lock(&audit_filter_mutex);
668 }
669 list_del(&cursor);
670 mutex_unlock(&audit_filter_mutex);
671}
672
Al Viro74c3cbe2007-07-22 08:04:18 -0400673int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
674{
675
676 if (pathname[0] != '/' ||
677 rule->listnr != AUDIT_FILTER_EXIT ||
Al Viro5af75d82008-12-16 05:59:26 -0500678 op != Audit_equal ||
Al Viro74c3cbe2007-07-22 08:04:18 -0400679 rule->inode_f || rule->watch || rule->tree)
680 return -EINVAL;
681 rule->tree = alloc_tree(pathname);
682 if (!rule->tree)
683 return -ENOMEM;
684 return 0;
685}
686
687void audit_put_tree(struct audit_tree *tree)
688{
689 put_tree(tree);
690}
691
Al Viro1f707132010-01-30 22:51:25 -0500692static int tag_mount(struct vfsmount *mnt, void *arg)
693{
David Howells3b362152015-03-17 22:26:21 +0000694 return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
Al Viro1f707132010-01-30 22:51:25 -0500695}
696
Imre Palikf1aaf262015-02-23 15:37:59 -0500697/*
698 * That gets run when evict_chunk() ends up needing to kill audit_tree.
699 * Runs from a separate thread.
700 */
701static int prune_tree_thread(void *unused)
702{
703 for (;;) {
Jiri Slaby0bf676d2016-03-31 10:49:28 +0200704 if (list_empty(&prune_list)) {
705 set_current_state(TASK_INTERRUPTIBLE);
Imre Palikf1aaf262015-02-23 15:37:59 -0500706 schedule();
Jiri Slaby0bf676d2016-03-31 10:49:28 +0200707 }
Imre Palikf1aaf262015-02-23 15:37:59 -0500708
709 mutex_lock(&audit_cmd_mutex);
710 mutex_lock(&audit_filter_mutex);
711
712 while (!list_empty(&prune_list)) {
713 struct audit_tree *victim;
714
715 victim = list_entry(prune_list.next,
716 struct audit_tree, list);
717 list_del_init(&victim->list);
718
719 mutex_unlock(&audit_filter_mutex);
720
721 prune_one(victim);
722
723 mutex_lock(&audit_filter_mutex);
724 }
725
726 mutex_unlock(&audit_filter_mutex);
727 mutex_unlock(&audit_cmd_mutex);
728 }
729 return 0;
730}
731
732static int audit_launch_prune(void)
733{
734 if (prune_thread)
735 return 0;
Jiri Slaby0bf676d2016-03-31 10:49:28 +0200736 prune_thread = kthread_run(prune_tree_thread, NULL,
Imre Palikf1aaf262015-02-23 15:37:59 -0500737 "audit_prune_tree");
738 if (IS_ERR(prune_thread)) {
739 pr_err("cannot start thread audit_prune_tree");
740 prune_thread = NULL;
741 return -ENOMEM;
Imre Palikf1aaf262015-02-23 15:37:59 -0500742 }
Jiri Slaby0bf676d2016-03-31 10:49:28 +0200743 return 0;
Imre Palikf1aaf262015-02-23 15:37:59 -0500744}
745
Al Viro74c3cbe2007-07-22 08:04:18 -0400746/* called with audit_filter_mutex */
747int audit_add_tree_rule(struct audit_krule *rule)
748{
749 struct audit_tree *seed = rule->tree, *tree;
Al Viro98bc9932008-08-02 01:06:21 -0400750 struct path path;
Al Viro1f707132010-01-30 22:51:25 -0500751 struct vfsmount *mnt;
Al Viro74c3cbe2007-07-22 08:04:18 -0400752 int err;
753
Chen Gang736f3202013-06-12 14:05:07 -0700754 rule->tree = NULL;
Al Viro74c3cbe2007-07-22 08:04:18 -0400755 list_for_each_entry(tree, &tree_list, list) {
756 if (!strcmp(seed->pathname, tree->pathname)) {
757 put_tree(seed);
758 rule->tree = tree;
759 list_add(&rule->rlist, &tree->rules);
760 return 0;
761 }
762 }
763 tree = seed;
764 list_add(&tree->list, &tree_list);
765 list_add(&rule->rlist, &tree->rules);
766 /* do not set rule->tree yet */
767 mutex_unlock(&audit_filter_mutex);
768
Imre Palikf1aaf262015-02-23 15:37:59 -0500769 if (unlikely(!prune_thread)) {
770 err = audit_launch_prune();
771 if (err)
772 goto Err;
773 }
774
Al Viro98bc9932008-08-02 01:06:21 -0400775 err = kern_path(tree->pathname, 0, &path);
Al Viro74c3cbe2007-07-22 08:04:18 -0400776 if (err)
777 goto Err;
Al Viro589ff872009-04-18 03:28:19 -0400778 mnt = collect_mounts(&path);
Al Viro98bc9932008-08-02 01:06:21 -0400779 path_put(&path);
David Howellsbe34d1a2012-06-25 12:55:18 +0100780 if (IS_ERR(mnt)) {
781 err = PTR_ERR(mnt);
Al Viro74c3cbe2007-07-22 08:04:18 -0400782 goto Err;
783 }
Al Viro74c3cbe2007-07-22 08:04:18 -0400784
785 get_tree(tree);
Al Viro1f707132010-01-30 22:51:25 -0500786 err = iterate_mounts(tag_mount, tree, mnt);
Al Viro74c3cbe2007-07-22 08:04:18 -0400787 drop_collected_mounts(mnt);
788
789 if (!err) {
790 struct node *node;
791 spin_lock(&hash_lock);
792 list_for_each_entry(node, &tree->chunks, list)
793 node->index &= ~(1U<<31);
794 spin_unlock(&hash_lock);
795 } else {
796 trim_marked(tree);
797 goto Err;
798 }
799
800 mutex_lock(&audit_filter_mutex);
801 if (list_empty(&rule->rlist)) {
802 put_tree(tree);
803 return -ENOENT;
804 }
805 rule->tree = tree;
806 put_tree(tree);
807
808 return 0;
809Err:
810 mutex_lock(&audit_filter_mutex);
811 list_del_init(&tree->list);
812 list_del_init(&tree->rules);
813 put_tree(tree);
814 return err;
815}
816
817int audit_tag_tree(char *old, char *new)
818{
819 struct list_head cursor, barrier;
820 int failed = 0;
Al Viro2096f752010-01-30 13:16:21 -0500821 struct path path1, path2;
Al Viro74c3cbe2007-07-22 08:04:18 -0400822 struct vfsmount *tagged;
Al Viro74c3cbe2007-07-22 08:04:18 -0400823 int err;
824
Al Viro2096f752010-01-30 13:16:21 -0500825 err = kern_path(new, 0, &path2);
Al Viro74c3cbe2007-07-22 08:04:18 -0400826 if (err)
827 return err;
Al Viro2096f752010-01-30 13:16:21 -0500828 tagged = collect_mounts(&path2);
829 path_put(&path2);
David Howellsbe34d1a2012-06-25 12:55:18 +0100830 if (IS_ERR(tagged))
831 return PTR_ERR(tagged);
Al Viro74c3cbe2007-07-22 08:04:18 -0400832
Al Viro2096f752010-01-30 13:16:21 -0500833 err = kern_path(old, 0, &path1);
Al Viro74c3cbe2007-07-22 08:04:18 -0400834 if (err) {
835 drop_collected_mounts(tagged);
836 return err;
837 }
Al Viro74c3cbe2007-07-22 08:04:18 -0400838
Al Viro74c3cbe2007-07-22 08:04:18 -0400839 mutex_lock(&audit_filter_mutex);
840 list_add(&barrier, &tree_list);
841 list_add(&cursor, &barrier);
842
843 while (cursor.next != &tree_list) {
844 struct audit_tree *tree;
Al Viro2096f752010-01-30 13:16:21 -0500845 int good_one = 0;
Al Viro74c3cbe2007-07-22 08:04:18 -0400846
847 tree = container_of(cursor.next, struct audit_tree, list);
848 get_tree(tree);
849 list_del(&cursor);
850 list_add(&cursor, &tree->list);
851 mutex_unlock(&audit_filter_mutex);
852
Al Viro2096f752010-01-30 13:16:21 -0500853 err = kern_path(tree->pathname, 0, &path2);
854 if (!err) {
855 good_one = path_is_under(&path1, &path2);
856 path_put(&path2);
Al Viro74c3cbe2007-07-22 08:04:18 -0400857 }
858
Al Viro2096f752010-01-30 13:16:21 -0500859 if (!good_one) {
Al Viro74c3cbe2007-07-22 08:04:18 -0400860 put_tree(tree);
861 mutex_lock(&audit_filter_mutex);
862 continue;
863 }
Al Viro74c3cbe2007-07-22 08:04:18 -0400864
Al Viro1f707132010-01-30 22:51:25 -0500865 failed = iterate_mounts(tag_mount, tree, tagged);
Al Viro74c3cbe2007-07-22 08:04:18 -0400866 if (failed) {
867 put_tree(tree);
868 mutex_lock(&audit_filter_mutex);
869 break;
870 }
871
872 mutex_lock(&audit_filter_mutex);
873 spin_lock(&hash_lock);
874 if (!tree->goner) {
875 list_del(&tree->list);
876 list_add(&tree->list, &tree_list);
877 }
878 spin_unlock(&hash_lock);
879 put_tree(tree);
880 }
881
882 while (barrier.prev != &tree_list) {
883 struct audit_tree *tree;
884
885 tree = container_of(barrier.prev, struct audit_tree, list);
886 get_tree(tree);
887 list_del(&tree->list);
888 list_add(&tree->list, &barrier);
889 mutex_unlock(&audit_filter_mutex);
890
891 if (!failed) {
892 struct node *node;
893 spin_lock(&hash_lock);
894 list_for_each_entry(node, &tree->chunks, list)
895 node->index &= ~(1U<<31);
896 spin_unlock(&hash_lock);
897 } else {
898 trim_marked(tree);
899 }
900
901 put_tree(tree);
902 mutex_lock(&audit_filter_mutex);
903 }
904 list_del(&barrier);
905 list_del(&cursor);
Al Viro74c3cbe2007-07-22 08:04:18 -0400906 mutex_unlock(&audit_filter_mutex);
Al Viro2096f752010-01-30 13:16:21 -0500907 path_put(&path1);
Al Viro74c3cbe2007-07-22 08:04:18 -0400908 drop_collected_mounts(tagged);
909 return failed;
910}
911
Al Viro916d7572009-06-24 00:02:38 -0400912
913static void audit_schedule_prune(void)
914{
Imre Palikf1aaf262015-02-23 15:37:59 -0500915 wake_up_process(prune_thread);
Al Viro916d7572009-06-24 00:02:38 -0400916}
917
918/*
919 * ... and that one is done if evict_chunk() decides to delay until the end
920 * of syscall. Runs synchronously.
921 */
922void audit_kill_trees(struct list_head *list)
923{
924 mutex_lock(&audit_cmd_mutex);
925 mutex_lock(&audit_filter_mutex);
926
927 while (!list_empty(list)) {
928 struct audit_tree *victim;
929
930 victim = list_entry(list->next, struct audit_tree, list);
931 kill_rules(victim);
932 list_del_init(&victim->list);
933
934 mutex_unlock(&audit_filter_mutex);
935
936 prune_one(victim);
937
938 mutex_lock(&audit_filter_mutex);
939 }
940
941 mutex_unlock(&audit_filter_mutex);
942 mutex_unlock(&audit_cmd_mutex);
Al Viro74c3cbe2007-07-22 08:04:18 -0400943}
944
945/*
946 * Here comes the stuff asynchronous to auditctl operations
947 */
948
Al Viro74c3cbe2007-07-22 08:04:18 -0400949static void evict_chunk(struct audit_chunk *chunk)
950{
951 struct audit_tree *owner;
Al Viro916d7572009-06-24 00:02:38 -0400952 struct list_head *postponed = audit_killed_trees();
953 int need_prune = 0;
Al Viro74c3cbe2007-07-22 08:04:18 -0400954 int n;
955
956 if (chunk->dead)
957 return;
958
959 chunk->dead = 1;
960 mutex_lock(&audit_filter_mutex);
961 spin_lock(&hash_lock);
962 while (!list_empty(&chunk->trees)) {
963 owner = list_entry(chunk->trees.next,
964 struct audit_tree, same_root);
965 owner->goner = 1;
966 owner->root = NULL;
967 list_del_init(&owner->same_root);
968 spin_unlock(&hash_lock);
Al Viro916d7572009-06-24 00:02:38 -0400969 if (!postponed) {
970 kill_rules(owner);
971 list_move(&owner->list, &prune_list);
972 need_prune = 1;
973 } else {
974 list_move(&owner->list, postponed);
975 }
Al Viro74c3cbe2007-07-22 08:04:18 -0400976 spin_lock(&hash_lock);
977 }
978 list_del_rcu(&chunk->hash);
979 for (n = 0; n < chunk->count; n++)
980 list_del_init(&chunk->owners[n].list);
981 spin_unlock(&hash_lock);
Imre Palikf1aaf262015-02-23 15:37:59 -0500982 mutex_unlock(&audit_filter_mutex);
Al Viro916d7572009-06-24 00:02:38 -0400983 if (need_prune)
984 audit_schedule_prune();
Al Viro74c3cbe2007-07-22 08:04:18 -0400985}
986
Eric Paris3a9b16b2010-07-28 10:18:38 -0400987static int audit_tree_handle_event(struct fsnotify_group *group,
Jan Kara7053aee2014-01-21 15:48:14 -0800988 struct inode *to_tell,
Eric Parisce8f76f2010-07-28 10:18:39 -0400989 struct fsnotify_mark *inode_mark,
Jan Kara7053aee2014-01-21 15:48:14 -0800990 struct fsnotify_mark *vfsmount_mark,
Al Viro3cd5eca2016-11-20 20:19:09 -0500991 u32 mask, const void *data, int data_type,
Jan Kara45a22f42014-02-17 13:09:50 +0100992 const unsigned char *file_name, u32 cookie)
Al Viro74c3cbe2007-07-22 08:04:18 -0400993{
Jan Kara83c4c4b2014-01-21 15:48:15 -0800994 return 0;
Al Viro74c3cbe2007-07-22 08:04:18 -0400995}
996
Eric Parise61ce862009-12-17 21:24:24 -0500997static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
Al Viro74c3cbe2007-07-22 08:04:18 -0400998{
Eric Paris28a3a7e2009-12-17 20:12:05 -0500999 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
1000
1001 evict_chunk(chunk);
Miklos Szeredib3e86922012-08-15 12:55:22 +02001002
1003 /*
1004 * We are guaranteed to have at least one reference to the mark from
1005 * either the inode or the caller of fsnotify_destroy_mark().
1006 */
1007 BUG_ON(atomic_read(&entry->refcnt) < 1);
Al Viro74c3cbe2007-07-22 08:04:18 -04001008}
1009
Eric Paris28a3a7e2009-12-17 20:12:05 -05001010static const struct fsnotify_ops audit_tree_ops = {
1011 .handle_event = audit_tree_handle_event,
Eric Paris28a3a7e2009-12-17 20:12:05 -05001012 .freeing_mark = audit_tree_freeing_mark,
Al Viro74c3cbe2007-07-22 08:04:18 -04001013};
1014
1015static int __init audit_tree_init(void)
1016{
1017 int i;
1018
Eric Paris0d2e2a12009-12-17 21:24:22 -05001019 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
Eric Paris28a3a7e2009-12-17 20:12:05 -05001020 if (IS_ERR(audit_tree_group))
1021 audit_panic("cannot initialize fsnotify group for rectree watches");
Al Viro74c3cbe2007-07-22 08:04:18 -04001022
1023 for (i = 0; i < HASH_SIZE; i++)
1024 INIT_LIST_HEAD(&chunk_hash_heads[i]);
1025
1026 return 0;
1027}
1028__initcall(audit_tree_init);