blob: 5cfd1ea18de0a23de57e97e9fe52da2205f0d854 [file] [log] [blame]
Al Viro74c3cbe2007-07-22 08:04:18 -04001#include "audit.h"
Eric Paris28a3a7e2009-12-17 20:12:05 -05002#include <linux/fsnotify_backend.h>
Al Viro74c3cbe2007-07-22 08:04:18 -04003#include <linux/namei.h>
4#include <linux/mount.h>
Al Viro916d7572009-06-24 00:02:38 -04005#include <linux/kthread.h>
Elena Reshetova9d2378f2017-05-02 10:16:04 -04006#include <linux/refcount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09007#include <linux/slab.h>
Al Viro74c3cbe2007-07-22 08:04:18 -04008
9struct audit_tree;
10struct audit_chunk;
11
12struct audit_tree {
Elena Reshetova9d2378f2017-05-02 10:16:04 -040013 refcount_t count;
Al Viro74c3cbe2007-07-22 08:04:18 -040014 int goner;
15 struct audit_chunk *root;
16 struct list_head chunks;
17 struct list_head rules;
18 struct list_head list;
19 struct list_head same_root;
20 struct rcu_head head;
21 char pathname[];
22};
23
24struct audit_chunk {
25 struct list_head hash;
Eric Parise61ce862009-12-17 21:24:24 -050026 struct fsnotify_mark mark;
Al Viro74c3cbe2007-07-22 08:04:18 -040027 struct list_head trees; /* with root here */
28 int dead;
29 int count;
Al Viro8f7b0ba2008-11-15 01:15:43 +000030 atomic_long_t refs;
Al Viro74c3cbe2007-07-22 08:04:18 -040031 struct rcu_head head;
32 struct node {
33 struct list_head list;
34 struct audit_tree *owner;
35 unsigned index; /* index; upper bit indicates 'will prune' */
36 } owners[];
37};
38
39static LIST_HEAD(tree_list);
40static LIST_HEAD(prune_list);
Imre Palikf1aaf262015-02-23 15:37:59 -050041static struct task_struct *prune_thread;
Al Viro74c3cbe2007-07-22 08:04:18 -040042
43/*
44 * One struct chunk is attached to each inode of interest.
45 * We replace struct chunk on tagging/untagging.
46 * Rules have pointer to struct audit_tree.
47 * Rules have struct list_head rlist forming a list of rules over
48 * the same tree.
49 * References to struct chunk are collected at audit_inode{,_child}()
50 * time and used in AUDIT_TREE rule matching.
51 * These references are dropped at the same time we are calling
52 * audit_free_names(), etc.
53 *
54 * Cyclic lists galore:
55 * tree.chunks anchors chunk.owners[].list hash_lock
56 * tree.rules anchors rule.rlist audit_filter_mutex
57 * chunk.trees anchors tree.same_root hash_lock
58 * chunk.hash is a hash with middle bits of watch.inode as
59 * a hash function. RCU, hash_lock
60 *
61 * tree is refcounted; one reference for "some rules on rules_list refer to
62 * it", one for each chunk with pointer to it.
63 *
Eric Paris28a3a7e2009-12-17 20:12:05 -050064 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
Al Viro8f7b0ba2008-11-15 01:15:43 +000065 * of watch contributes 1 to .refs).
Al Viro74c3cbe2007-07-22 08:04:18 -040066 *
67 * node.index allows to get from node.list to containing chunk.
68 * MSB of that sucker is stolen to mark taggings that we might have to
69 * revert - several operations have very unpleasant cleanup logics and
70 * that makes a difference. Some.
71 */
72
Eric Paris28a3a7e2009-12-17 20:12:05 -050073static struct fsnotify_group *audit_tree_group;
Al Viro74c3cbe2007-07-22 08:04:18 -040074
75static struct audit_tree *alloc_tree(const char *s)
76{
77 struct audit_tree *tree;
78
79 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
80 if (tree) {
Elena Reshetova9d2378f2017-05-02 10:16:04 -040081 refcount_set(&tree->count, 1);
Al Viro74c3cbe2007-07-22 08:04:18 -040082 tree->goner = 0;
83 INIT_LIST_HEAD(&tree->chunks);
84 INIT_LIST_HEAD(&tree->rules);
85 INIT_LIST_HEAD(&tree->list);
86 INIT_LIST_HEAD(&tree->same_root);
87 tree->root = NULL;
88 strcpy(tree->pathname, s);
89 }
90 return tree;
91}
92
93static inline void get_tree(struct audit_tree *tree)
94{
Elena Reshetova9d2378f2017-05-02 10:16:04 -040095 refcount_inc(&tree->count);
Al Viro74c3cbe2007-07-22 08:04:18 -040096}
97
Al Viro74c3cbe2007-07-22 08:04:18 -040098static inline void put_tree(struct audit_tree *tree)
99{
Elena Reshetova9d2378f2017-05-02 10:16:04 -0400100 if (refcount_dec_and_test(&tree->count))
Lai Jiangshan3b097c42011-03-15 18:03:53 +0800101 kfree_rcu(tree, head);
Al Viro74c3cbe2007-07-22 08:04:18 -0400102}
103
104/* to avoid bringing the entire thing in audit.h */
105const char *audit_tree_path(struct audit_tree *tree)
106{
107 return tree->pathname;
108}
109
Al Viro8f7b0ba2008-11-15 01:15:43 +0000110static void free_chunk(struct audit_chunk *chunk)
Al Viro74c3cbe2007-07-22 08:04:18 -0400111{
Al Viro74c3cbe2007-07-22 08:04:18 -0400112 int i;
113
114 for (i = 0; i < chunk->count; i++) {
115 if (chunk->owners[i].owner)
116 put_tree(chunk->owners[i].owner);
117 }
118 kfree(chunk);
119}
120
Al Viro74c3cbe2007-07-22 08:04:18 -0400121void audit_put_chunk(struct audit_chunk *chunk)
122{
Al Viro8f7b0ba2008-11-15 01:15:43 +0000123 if (atomic_long_dec_and_test(&chunk->refs))
124 free_chunk(chunk);
125}
126
127static void __put_chunk(struct rcu_head *rcu)
128{
129 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
130 audit_put_chunk(chunk);
Al Viro74c3cbe2007-07-22 08:04:18 -0400131}
132
Eric Parise61ce862009-12-17 21:24:24 -0500133static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
Eric Paris28a3a7e2009-12-17 20:12:05 -0500134{
135 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
136 call_rcu(&chunk->head, __put_chunk);
137}
138
139static struct audit_chunk *alloc_chunk(int count)
140{
141 struct audit_chunk *chunk;
142 size_t size;
143 int i;
144
145 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
146 chunk = kzalloc(size, GFP_KERNEL);
147 if (!chunk)
148 return NULL;
149
150 INIT_LIST_HEAD(&chunk->hash);
151 INIT_LIST_HEAD(&chunk->trees);
152 chunk->count = count;
153 atomic_long_set(&chunk->refs, 1);
154 for (i = 0; i < count; i++) {
155 INIT_LIST_HEAD(&chunk->owners[i].list);
156 chunk->owners[i].index = i;
157 }
158 fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
Miklos Szeredi799b6012014-11-04 11:27:12 +0100159 chunk->mark.mask = FS_IN_IGNORED;
Eric Paris28a3a7e2009-12-17 20:12:05 -0500160 return chunk;
161}
162
Al Viro74c3cbe2007-07-22 08:04:18 -0400163enum {HASH_SIZE = 128};
164static struct list_head chunk_hash_heads[HASH_SIZE];
165static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
166
167static inline struct list_head *chunk_hash(const struct inode *inode)
168{
169 unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
170 return chunk_hash_heads + n % HASH_SIZE;
171}
172
Eric Paris28a3a7e2009-12-17 20:12:05 -0500173/* hash_lock & entry->lock is held by caller */
Al Viro74c3cbe2007-07-22 08:04:18 -0400174static void insert_hash(struct audit_chunk *chunk)
175{
Eric Parise61ce862009-12-17 21:24:24 -0500176 struct fsnotify_mark *entry = &chunk->mark;
Eric Paris28a3a7e2009-12-17 20:12:05 -0500177 struct list_head *list;
178
Jan Kara0809ab62014-12-12 16:58:36 -0800179 if (!entry->inode)
Eric Paris28a3a7e2009-12-17 20:12:05 -0500180 return;
Jan Kara0809ab62014-12-12 16:58:36 -0800181 list = chunk_hash(entry->inode);
Al Viro74c3cbe2007-07-22 08:04:18 -0400182 list_add_rcu(&chunk->hash, list);
183}
184
185/* called under rcu_read_lock */
186struct audit_chunk *audit_tree_lookup(const struct inode *inode)
187{
188 struct list_head *list = chunk_hash(inode);
Paul E. McKenney6793a052008-05-14 17:10:12 -0700189 struct audit_chunk *p;
Al Viro74c3cbe2007-07-22 08:04:18 -0400190
Paul E. McKenney6793a052008-05-14 17:10:12 -0700191 list_for_each_entry_rcu(p, list, hash) {
Eric Paris28a3a7e2009-12-17 20:12:05 -0500192 /* mark.inode may have gone NULL, but who cares? */
Jan Kara0809ab62014-12-12 16:58:36 -0800193 if (p->mark.inode == inode) {
Al Viro8f7b0ba2008-11-15 01:15:43 +0000194 atomic_long_inc(&p->refs);
Al Viro74c3cbe2007-07-22 08:04:18 -0400195 return p;
196 }
197 }
198 return NULL;
199}
200
Yaowei Bai6f1b5d72015-11-04 08:23:51 -0500201bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
Al Viro74c3cbe2007-07-22 08:04:18 -0400202{
203 int n;
204 for (n = 0; n < chunk->count; n++)
205 if (chunk->owners[n].owner == tree)
Yaowei Bai6f1b5d72015-11-04 08:23:51 -0500206 return true;
207 return false;
Al Viro74c3cbe2007-07-22 08:04:18 -0400208}
209
210/* tagging and untagging inodes with trees */
211
Al Viro8f7b0ba2008-11-15 01:15:43 +0000212static struct audit_chunk *find_chunk(struct node *p)
Al Viro74c3cbe2007-07-22 08:04:18 -0400213{
Al Viro8f7b0ba2008-11-15 01:15:43 +0000214 int index = p->index & ~(1U<<31);
215 p -= index;
216 return container_of(p, struct audit_chunk, owners[0]);
217}
218
219static void untag_chunk(struct node *p)
220{
221 struct audit_chunk *chunk = find_chunk(p);
Eric Parise61ce862009-12-17 21:24:24 -0500222 struct fsnotify_mark *entry = &chunk->mark;
Al Virof7a998a2010-10-30 02:18:32 -0400223 struct audit_chunk *new = NULL;
Al Viro74c3cbe2007-07-22 08:04:18 -0400224 struct audit_tree *owner;
225 int size = chunk->count - 1;
226 int i, j;
227
Eric Paris28a3a7e2009-12-17 20:12:05 -0500228 fsnotify_get_mark(entry);
Al Viro8f7b0ba2008-11-15 01:15:43 +0000229
230 spin_unlock(&hash_lock);
231
Al Virof7a998a2010-10-30 02:18:32 -0400232 if (size)
233 new = alloc_chunk(size);
234
Jan Karabe29d202016-12-14 14:40:05 +0100235 mutex_lock(&entry->group->mark_mutex);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500236 spin_lock(&entry->lock);
Jan Kara0809ab62014-12-12 16:58:36 -0800237 if (chunk->dead || !entry->inode) {
Eric Paris28a3a7e2009-12-17 20:12:05 -0500238 spin_unlock(&entry->lock);
Jan Karabe29d202016-12-14 14:40:05 +0100239 mutex_unlock(&entry->group->mark_mutex);
Al Virof7a998a2010-10-30 02:18:32 -0400240 if (new)
241 free_chunk(new);
Al Viro8f7b0ba2008-11-15 01:15:43 +0000242 goto out;
Al Viro74c3cbe2007-07-22 08:04:18 -0400243 }
244
245 owner = p->owner;
246
247 if (!size) {
248 chunk->dead = 1;
249 spin_lock(&hash_lock);
250 list_del_init(&chunk->trees);
251 if (owner->root == chunk)
252 owner->root = NULL;
253 list_del_init(&p->list);
254 list_del_rcu(&chunk->hash);
255 spin_unlock(&hash_lock);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500256 spin_unlock(&entry->lock);
Jan Karabe29d202016-12-14 14:40:05 +0100257 mutex_unlock(&entry->group->mark_mutex);
Lino Sanfilippoe2a29942011-06-14 17:29:51 +0200258 fsnotify_destroy_mark(entry, audit_tree_group);
Al Viro8f7b0ba2008-11-15 01:15:43 +0000259 goto out;
Al Viro74c3cbe2007-07-22 08:04:18 -0400260 }
261
Al Viro74c3cbe2007-07-22 08:04:18 -0400262 if (!new)
263 goto Fallback;
Al Virof7a998a2010-10-30 02:18:32 -0400264
Jan Karabe29d202016-12-14 14:40:05 +0100265 if (fsnotify_add_mark_locked(&new->mark, entry->group, entry->inode,
266 NULL, 1)) {
Miklos Szeredi0fe33aa2012-08-15 12:55:22 +0200267 fsnotify_put_mark(&new->mark);
Al Viro74c3cbe2007-07-22 08:04:18 -0400268 goto Fallback;
269 }
270
271 chunk->dead = 1;
272 spin_lock(&hash_lock);
273 list_replace_init(&chunk->trees, &new->trees);
274 if (owner->root == chunk) {
275 list_del_init(&owner->same_root);
276 owner->root = NULL;
277 }
278
Al Viro6f5d5112009-12-19 15:59:45 +0000279 for (i = j = 0; j <= size; i++, j++) {
Al Viro74c3cbe2007-07-22 08:04:18 -0400280 struct audit_tree *s;
281 if (&chunk->owners[j] == p) {
282 list_del_init(&p->list);
283 i--;
284 continue;
285 }
286 s = chunk->owners[j].owner;
287 new->owners[i].owner = s;
288 new->owners[i].index = chunk->owners[j].index - j + i;
289 if (!s) /* result of earlier fallback */
290 continue;
291 get_tree(s);
Al Viro6f5d5112009-12-19 15:59:45 +0000292 list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
Al Viro74c3cbe2007-07-22 08:04:18 -0400293 }
294
295 list_replace_rcu(&chunk->hash, &new->hash);
296 list_for_each_entry(owner, &new->trees, same_root)
297 owner->root = new;
298 spin_unlock(&hash_lock);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500299 spin_unlock(&entry->lock);
Jan Karabe29d202016-12-14 14:40:05 +0100300 mutex_unlock(&entry->group->mark_mutex);
Lino Sanfilippoe2a29942011-06-14 17:29:51 +0200301 fsnotify_destroy_mark(entry, audit_tree_group);
Miklos Szeredib3e86922012-08-15 12:55:22 +0200302 fsnotify_put_mark(&new->mark); /* drop initial reference */
Al Viro8f7b0ba2008-11-15 01:15:43 +0000303 goto out;
Al Viro74c3cbe2007-07-22 08:04:18 -0400304
305Fallback:
306 // do the best we can
307 spin_lock(&hash_lock);
308 if (owner->root == chunk) {
309 list_del_init(&owner->same_root);
310 owner->root = NULL;
311 }
312 list_del_init(&p->list);
313 p->owner = NULL;
314 put_tree(owner);
315 spin_unlock(&hash_lock);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500316 spin_unlock(&entry->lock);
Jan Karabe29d202016-12-14 14:40:05 +0100317 mutex_unlock(&entry->group->mark_mutex);
Al Viro8f7b0ba2008-11-15 01:15:43 +0000318out:
Eric Paris28a3a7e2009-12-17 20:12:05 -0500319 fsnotify_put_mark(entry);
Al Viro8f7b0ba2008-11-15 01:15:43 +0000320 spin_lock(&hash_lock);
Al Viro74c3cbe2007-07-22 08:04:18 -0400321}
322
323static int create_chunk(struct inode *inode, struct audit_tree *tree)
324{
Eric Parise61ce862009-12-17 21:24:24 -0500325 struct fsnotify_mark *entry;
Al Viro74c3cbe2007-07-22 08:04:18 -0400326 struct audit_chunk *chunk = alloc_chunk(1);
327 if (!chunk)
328 return -ENOMEM;
329
Eric Paris28a3a7e2009-12-17 20:12:05 -0500330 entry = &chunk->mark;
Eric Paris5444e292009-12-17 21:24:27 -0500331 if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
Miklos Szeredi0fe33aa2012-08-15 12:55:22 +0200332 fsnotify_put_mark(entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400333 return -ENOSPC;
334 }
335
Eric Paris28a3a7e2009-12-17 20:12:05 -0500336 spin_lock(&entry->lock);
Al Viro74c3cbe2007-07-22 08:04:18 -0400337 spin_lock(&hash_lock);
338 if (tree->goner) {
339 spin_unlock(&hash_lock);
340 chunk->dead = 1;
Eric Paris28a3a7e2009-12-17 20:12:05 -0500341 spin_unlock(&entry->lock);
Lino Sanfilippoe2a29942011-06-14 17:29:51 +0200342 fsnotify_destroy_mark(entry, audit_tree_group);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500343 fsnotify_put_mark(entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400344 return 0;
345 }
346 chunk->owners[0].index = (1U << 31);
347 chunk->owners[0].owner = tree;
348 get_tree(tree);
349 list_add(&chunk->owners[0].list, &tree->chunks);
350 if (!tree->root) {
351 tree->root = chunk;
352 list_add(&tree->same_root, &chunk->trees);
353 }
354 insert_hash(chunk);
355 spin_unlock(&hash_lock);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500356 spin_unlock(&entry->lock);
Miklos Szeredib3e86922012-08-15 12:55:22 +0200357 fsnotify_put_mark(entry); /* drop initial reference */
Al Viro74c3cbe2007-07-22 08:04:18 -0400358 return 0;
359}
360
361/* the first tagged inode becomes root of tree */
362static int tag_chunk(struct inode *inode, struct audit_tree *tree)
363{
Eric Parise61ce862009-12-17 21:24:24 -0500364 struct fsnotify_mark *old_entry, *chunk_entry;
Al Viro74c3cbe2007-07-22 08:04:18 -0400365 struct audit_tree *owner;
366 struct audit_chunk *chunk, *old;
367 struct node *p;
368 int n;
369
Eric Paris5444e292009-12-17 21:24:27 -0500370 old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500371 if (!old_entry)
Al Viro74c3cbe2007-07-22 08:04:18 -0400372 return create_chunk(inode, tree);
373
Eric Paris28a3a7e2009-12-17 20:12:05 -0500374 old = container_of(old_entry, struct audit_chunk, mark);
Al Viro74c3cbe2007-07-22 08:04:18 -0400375
376 /* are we already there? */
377 spin_lock(&hash_lock);
378 for (n = 0; n < old->count; n++) {
379 if (old->owners[n].owner == tree) {
380 spin_unlock(&hash_lock);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500381 fsnotify_put_mark(old_entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400382 return 0;
383 }
384 }
385 spin_unlock(&hash_lock);
386
387 chunk = alloc_chunk(old->count + 1);
Al Virob4c30aa2009-12-19 16:03:30 +0000388 if (!chunk) {
Eric Paris28a3a7e2009-12-17 20:12:05 -0500389 fsnotify_put_mark(old_entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400390 return -ENOMEM;
Al Virob4c30aa2009-12-19 16:03:30 +0000391 }
Al Viro74c3cbe2007-07-22 08:04:18 -0400392
Eric Paris28a3a7e2009-12-17 20:12:05 -0500393 chunk_entry = &chunk->mark;
394
Jan Karabe29d202016-12-14 14:40:05 +0100395 mutex_lock(&old_entry->group->mark_mutex);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500396 spin_lock(&old_entry->lock);
Jan Kara0809ab62014-12-12 16:58:36 -0800397 if (!old_entry->inode) {
Eric Paris28a3a7e2009-12-17 20:12:05 -0500398 /* old_entry is being shot, lets just lie */
399 spin_unlock(&old_entry->lock);
Jan Karabe29d202016-12-14 14:40:05 +0100400 mutex_unlock(&old_entry->group->mark_mutex);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500401 fsnotify_put_mark(old_entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400402 free_chunk(chunk);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500403 return -ENOENT;
404 }
405
Jan Karabe29d202016-12-14 14:40:05 +0100406 if (fsnotify_add_mark_locked(chunk_entry, old_entry->group,
407 old_entry->inode, NULL, 1)) {
Eric Paris28a3a7e2009-12-17 20:12:05 -0500408 spin_unlock(&old_entry->lock);
Jan Karabe29d202016-12-14 14:40:05 +0100409 mutex_unlock(&old_entry->group->mark_mutex);
Miklos Szeredi0fe33aa2012-08-15 12:55:22 +0200410 fsnotify_put_mark(chunk_entry);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500411 fsnotify_put_mark(old_entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400412 return -ENOSPC;
413 }
Eric Paris28a3a7e2009-12-17 20:12:05 -0500414
415 /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
416 spin_lock(&chunk_entry->lock);
Al Viro74c3cbe2007-07-22 08:04:18 -0400417 spin_lock(&hash_lock);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500418
419 /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
Al Viro74c3cbe2007-07-22 08:04:18 -0400420 if (tree->goner) {
421 spin_unlock(&hash_lock);
422 chunk->dead = 1;
Eric Paris28a3a7e2009-12-17 20:12:05 -0500423 spin_unlock(&chunk_entry->lock);
424 spin_unlock(&old_entry->lock);
Jan Karabe29d202016-12-14 14:40:05 +0100425 mutex_unlock(&old_entry->group->mark_mutex);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500426
Lino Sanfilippoe2a29942011-06-14 17:29:51 +0200427 fsnotify_destroy_mark(chunk_entry, audit_tree_group);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500428
429 fsnotify_put_mark(chunk_entry);
430 fsnotify_put_mark(old_entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400431 return 0;
432 }
433 list_replace_init(&old->trees, &chunk->trees);
434 for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
435 struct audit_tree *s = old->owners[n].owner;
436 p->owner = s;
437 p->index = old->owners[n].index;
438 if (!s) /* result of fallback in untag */
439 continue;
440 get_tree(s);
441 list_replace_init(&old->owners[n].list, &p->list);
442 }
443 p->index = (chunk->count - 1) | (1U<<31);
444 p->owner = tree;
445 get_tree(tree);
446 list_add(&p->list, &tree->chunks);
447 list_replace_rcu(&old->hash, &chunk->hash);
448 list_for_each_entry(owner, &chunk->trees, same_root)
449 owner->root = chunk;
450 old->dead = 1;
451 if (!tree->root) {
452 tree->root = chunk;
453 list_add(&tree->same_root, &chunk->trees);
454 }
455 spin_unlock(&hash_lock);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500456 spin_unlock(&chunk_entry->lock);
457 spin_unlock(&old_entry->lock);
Jan Karabe29d202016-12-14 14:40:05 +0100458 mutex_unlock(&old_entry->group->mark_mutex);
Lino Sanfilippoe2a29942011-06-14 17:29:51 +0200459 fsnotify_destroy_mark(old_entry, audit_tree_group);
Miklos Szeredib3e86922012-08-15 12:55:22 +0200460 fsnotify_put_mark(chunk_entry); /* drop initial reference */
Eric Paris28a3a7e2009-12-17 20:12:05 -0500461 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
Al Viro74c3cbe2007-07-22 08:04:18 -0400462 return 0;
463}
464
Richard Guy Briggs2991dd22014-10-02 22:05:24 -0400465static void audit_tree_log_remove_rule(struct audit_krule *rule)
Kees Cook0644ec02013-01-11 14:32:07 -0800466{
467 struct audit_buffer *ab;
468
469 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
470 if (unlikely(!ab))
471 return;
Steve Grubbc1e8f062016-11-16 16:14:33 -0500472 audit_log_format(ab, "op=remove_rule");
Kees Cook0644ec02013-01-11 14:32:07 -0800473 audit_log_format(ab, " dir=");
474 audit_log_untrustedstring(ab, rule->tree->pathname);
475 audit_log_key(ab, rule->filterkey);
476 audit_log_format(ab, " list=%d res=1", rule->listnr);
477 audit_log_end(ab);
478}
479
Al Viro74c3cbe2007-07-22 08:04:18 -0400480static void kill_rules(struct audit_tree *tree)
481{
482 struct audit_krule *rule, *next;
483 struct audit_entry *entry;
Al Viro74c3cbe2007-07-22 08:04:18 -0400484
485 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
486 entry = container_of(rule, struct audit_entry, rule);
487
488 list_del_init(&rule->rlist);
489 if (rule->tree) {
490 /* not a half-baked one */
Richard Guy Briggs2991dd22014-10-02 22:05:24 -0400491 audit_tree_log_remove_rule(rule);
Richard Guy Briggs34d99af52015-08-05 16:29:37 -0400492 if (entry->rule.exe)
493 audit_remove_mark(entry->rule.exe);
Al Viro74c3cbe2007-07-22 08:04:18 -0400494 rule->tree = NULL;
495 list_del_rcu(&entry->list);
Al Viroe45aa212008-12-15 01:17:50 -0500496 list_del(&entry->rule.list);
Al Viro74c3cbe2007-07-22 08:04:18 -0400497 call_rcu(&entry->rcu, audit_free_rule_rcu);
498 }
499 }
500}
501
502/*
503 * finish killing struct audit_tree
504 */
505static void prune_one(struct audit_tree *victim)
506{
507 spin_lock(&hash_lock);
508 while (!list_empty(&victim->chunks)) {
509 struct node *p;
Al Viro74c3cbe2007-07-22 08:04:18 -0400510
511 p = list_entry(victim->chunks.next, struct node, list);
Al Viro74c3cbe2007-07-22 08:04:18 -0400512
Al Viro8f7b0ba2008-11-15 01:15:43 +0000513 untag_chunk(p);
Al Viro74c3cbe2007-07-22 08:04:18 -0400514 }
515 spin_unlock(&hash_lock);
516 put_tree(victim);
517}
518
519/* trim the uncommitted chunks from tree */
520
521static void trim_marked(struct audit_tree *tree)
522{
523 struct list_head *p, *q;
524 spin_lock(&hash_lock);
525 if (tree->goner) {
526 spin_unlock(&hash_lock);
527 return;
528 }
529 /* reorder */
530 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
531 struct node *node = list_entry(p, struct node, list);
532 q = p->next;
533 if (node->index & (1U<<31)) {
534 list_del_init(p);
535 list_add(p, &tree->chunks);
536 }
537 }
538
539 while (!list_empty(&tree->chunks)) {
540 struct node *node;
Al Viro74c3cbe2007-07-22 08:04:18 -0400541
542 node = list_entry(tree->chunks.next, struct node, list);
543
544 /* have we run out of marked? */
545 if (!(node->index & (1U<<31)))
546 break;
547
Al Viro8f7b0ba2008-11-15 01:15:43 +0000548 untag_chunk(node);
Al Viro74c3cbe2007-07-22 08:04:18 -0400549 }
550 if (!tree->root && !tree->goner) {
551 tree->goner = 1;
552 spin_unlock(&hash_lock);
553 mutex_lock(&audit_filter_mutex);
554 kill_rules(tree);
555 list_del_init(&tree->list);
556 mutex_unlock(&audit_filter_mutex);
557 prune_one(tree);
558 } else {
559 spin_unlock(&hash_lock);
560 }
561}
562
Al Viro916d7572009-06-24 00:02:38 -0400563static void audit_schedule_prune(void);
564
Al Viro74c3cbe2007-07-22 08:04:18 -0400565/* called with audit_filter_mutex */
566int audit_remove_tree_rule(struct audit_krule *rule)
567{
568 struct audit_tree *tree;
569 tree = rule->tree;
570 if (tree) {
571 spin_lock(&hash_lock);
572 list_del_init(&rule->rlist);
573 if (list_empty(&tree->rules) && !tree->goner) {
574 tree->root = NULL;
575 list_del_init(&tree->same_root);
576 tree->goner = 1;
577 list_move(&tree->list, &prune_list);
578 rule->tree = NULL;
579 spin_unlock(&hash_lock);
580 audit_schedule_prune();
581 return 1;
582 }
583 rule->tree = NULL;
584 spin_unlock(&hash_lock);
585 return 1;
586 }
587 return 0;
588}
589
Al Viro1f707132010-01-30 22:51:25 -0500590static int compare_root(struct vfsmount *mnt, void *arg)
591{
David Howells3b362152015-03-17 22:26:21 +0000592 return d_backing_inode(mnt->mnt_root) == arg;
Al Viro1f707132010-01-30 22:51:25 -0500593}
594
Al Viro74c3cbe2007-07-22 08:04:18 -0400595void audit_trim_trees(void)
596{
597 struct list_head cursor;
598
599 mutex_lock(&audit_filter_mutex);
600 list_add(&cursor, &tree_list);
601 while (cursor.next != &tree_list) {
602 struct audit_tree *tree;
Al Viro98bc9932008-08-02 01:06:21 -0400603 struct path path;
Al Viro74c3cbe2007-07-22 08:04:18 -0400604 struct vfsmount *root_mnt;
605 struct node *node;
Al Viro74c3cbe2007-07-22 08:04:18 -0400606 int err;
607
608 tree = container_of(cursor.next, struct audit_tree, list);
609 get_tree(tree);
610 list_del(&cursor);
611 list_add(&cursor, &tree->list);
612 mutex_unlock(&audit_filter_mutex);
613
Al Viro98bc9932008-08-02 01:06:21 -0400614 err = kern_path(tree->pathname, 0, &path);
Al Viro74c3cbe2007-07-22 08:04:18 -0400615 if (err)
616 goto skip_it;
617
Al Viro589ff872009-04-18 03:28:19 -0400618 root_mnt = collect_mounts(&path);
Al Viro98bc9932008-08-02 01:06:21 -0400619 path_put(&path);
David Howellsbe34d1a2012-06-25 12:55:18 +0100620 if (IS_ERR(root_mnt))
Al Viro74c3cbe2007-07-22 08:04:18 -0400621 goto skip_it;
622
Al Viro74c3cbe2007-07-22 08:04:18 -0400623 spin_lock(&hash_lock);
624 list_for_each_entry(node, &tree->chunks, list) {
Eric Paris28a3a7e2009-12-17 20:12:05 -0500625 struct audit_chunk *chunk = find_chunk(node);
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300626 /* this could be NULL if the watch is dying else where... */
Jan Kara0809ab62014-12-12 16:58:36 -0800627 struct inode *inode = chunk->mark.inode;
Al Viro74c3cbe2007-07-22 08:04:18 -0400628 node->index |= 1U<<31;
Al Viro1f707132010-01-30 22:51:25 -0500629 if (iterate_mounts(compare_root, inode, root_mnt))
630 node->index &= ~(1U<<31);
Al Viro74c3cbe2007-07-22 08:04:18 -0400631 }
632 spin_unlock(&hash_lock);
633 trim_marked(tree);
Al Viro74c3cbe2007-07-22 08:04:18 -0400634 drop_collected_mounts(root_mnt);
635skip_it:
Chen Gang12b2f112013-04-29 15:05:19 -0700636 put_tree(tree);
Al Viro74c3cbe2007-07-22 08:04:18 -0400637 mutex_lock(&audit_filter_mutex);
638 }
639 list_del(&cursor);
640 mutex_unlock(&audit_filter_mutex);
641}
642
Al Viro74c3cbe2007-07-22 08:04:18 -0400643int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
644{
645
646 if (pathname[0] != '/' ||
647 rule->listnr != AUDIT_FILTER_EXIT ||
Al Viro5af75d82008-12-16 05:59:26 -0500648 op != Audit_equal ||
Al Viro74c3cbe2007-07-22 08:04:18 -0400649 rule->inode_f || rule->watch || rule->tree)
650 return -EINVAL;
651 rule->tree = alloc_tree(pathname);
652 if (!rule->tree)
653 return -ENOMEM;
654 return 0;
655}
656
657void audit_put_tree(struct audit_tree *tree)
658{
659 put_tree(tree);
660}
661
Al Viro1f707132010-01-30 22:51:25 -0500662static int tag_mount(struct vfsmount *mnt, void *arg)
663{
David Howells3b362152015-03-17 22:26:21 +0000664 return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
Al Viro1f707132010-01-30 22:51:25 -0500665}
666
Imre Palikf1aaf262015-02-23 15:37:59 -0500667/*
668 * That gets run when evict_chunk() ends up needing to kill audit_tree.
669 * Runs from a separate thread.
670 */
671static int prune_tree_thread(void *unused)
672{
673 for (;;) {
Jiri Slaby0bf676d2016-03-31 10:49:28 +0200674 if (list_empty(&prune_list)) {
675 set_current_state(TASK_INTERRUPTIBLE);
Imre Palikf1aaf262015-02-23 15:37:59 -0500676 schedule();
Jiri Slaby0bf676d2016-03-31 10:49:28 +0200677 }
Imre Palikf1aaf262015-02-23 15:37:59 -0500678
679 mutex_lock(&audit_cmd_mutex);
680 mutex_lock(&audit_filter_mutex);
681
682 while (!list_empty(&prune_list)) {
683 struct audit_tree *victim;
684
685 victim = list_entry(prune_list.next,
686 struct audit_tree, list);
687 list_del_init(&victim->list);
688
689 mutex_unlock(&audit_filter_mutex);
690
691 prune_one(victim);
692
693 mutex_lock(&audit_filter_mutex);
694 }
695
696 mutex_unlock(&audit_filter_mutex);
697 mutex_unlock(&audit_cmd_mutex);
698 }
699 return 0;
700}
701
702static int audit_launch_prune(void)
703{
704 if (prune_thread)
705 return 0;
Jiri Slaby0bf676d2016-03-31 10:49:28 +0200706 prune_thread = kthread_run(prune_tree_thread, NULL,
Imre Palikf1aaf262015-02-23 15:37:59 -0500707 "audit_prune_tree");
708 if (IS_ERR(prune_thread)) {
709 pr_err("cannot start thread audit_prune_tree");
710 prune_thread = NULL;
711 return -ENOMEM;
Imre Palikf1aaf262015-02-23 15:37:59 -0500712 }
Jiri Slaby0bf676d2016-03-31 10:49:28 +0200713 return 0;
Imre Palikf1aaf262015-02-23 15:37:59 -0500714}
715
Al Viro74c3cbe2007-07-22 08:04:18 -0400716/* called with audit_filter_mutex */
717int audit_add_tree_rule(struct audit_krule *rule)
718{
719 struct audit_tree *seed = rule->tree, *tree;
Al Viro98bc9932008-08-02 01:06:21 -0400720 struct path path;
Al Viro1f707132010-01-30 22:51:25 -0500721 struct vfsmount *mnt;
Al Viro74c3cbe2007-07-22 08:04:18 -0400722 int err;
723
Chen Gang736f3202013-06-12 14:05:07 -0700724 rule->tree = NULL;
Al Viro74c3cbe2007-07-22 08:04:18 -0400725 list_for_each_entry(tree, &tree_list, list) {
726 if (!strcmp(seed->pathname, tree->pathname)) {
727 put_tree(seed);
728 rule->tree = tree;
729 list_add(&rule->rlist, &tree->rules);
730 return 0;
731 }
732 }
733 tree = seed;
734 list_add(&tree->list, &tree_list);
735 list_add(&rule->rlist, &tree->rules);
736 /* do not set rule->tree yet */
737 mutex_unlock(&audit_filter_mutex);
738
Imre Palikf1aaf262015-02-23 15:37:59 -0500739 if (unlikely(!prune_thread)) {
740 err = audit_launch_prune();
741 if (err)
742 goto Err;
743 }
744
Al Viro98bc9932008-08-02 01:06:21 -0400745 err = kern_path(tree->pathname, 0, &path);
Al Viro74c3cbe2007-07-22 08:04:18 -0400746 if (err)
747 goto Err;
Al Viro589ff872009-04-18 03:28:19 -0400748 mnt = collect_mounts(&path);
Al Viro98bc9932008-08-02 01:06:21 -0400749 path_put(&path);
David Howellsbe34d1a2012-06-25 12:55:18 +0100750 if (IS_ERR(mnt)) {
751 err = PTR_ERR(mnt);
Al Viro74c3cbe2007-07-22 08:04:18 -0400752 goto Err;
753 }
Al Viro74c3cbe2007-07-22 08:04:18 -0400754
755 get_tree(tree);
Al Viro1f707132010-01-30 22:51:25 -0500756 err = iterate_mounts(tag_mount, tree, mnt);
Al Viro74c3cbe2007-07-22 08:04:18 -0400757 drop_collected_mounts(mnt);
758
759 if (!err) {
760 struct node *node;
761 spin_lock(&hash_lock);
762 list_for_each_entry(node, &tree->chunks, list)
763 node->index &= ~(1U<<31);
764 spin_unlock(&hash_lock);
765 } else {
766 trim_marked(tree);
767 goto Err;
768 }
769
770 mutex_lock(&audit_filter_mutex);
771 if (list_empty(&rule->rlist)) {
772 put_tree(tree);
773 return -ENOENT;
774 }
775 rule->tree = tree;
776 put_tree(tree);
777
778 return 0;
779Err:
780 mutex_lock(&audit_filter_mutex);
781 list_del_init(&tree->list);
782 list_del_init(&tree->rules);
783 put_tree(tree);
784 return err;
785}
786
787int audit_tag_tree(char *old, char *new)
788{
789 struct list_head cursor, barrier;
790 int failed = 0;
Al Viro2096f752010-01-30 13:16:21 -0500791 struct path path1, path2;
Al Viro74c3cbe2007-07-22 08:04:18 -0400792 struct vfsmount *tagged;
Al Viro74c3cbe2007-07-22 08:04:18 -0400793 int err;
794
Al Viro2096f752010-01-30 13:16:21 -0500795 err = kern_path(new, 0, &path2);
Al Viro74c3cbe2007-07-22 08:04:18 -0400796 if (err)
797 return err;
Al Viro2096f752010-01-30 13:16:21 -0500798 tagged = collect_mounts(&path2);
799 path_put(&path2);
David Howellsbe34d1a2012-06-25 12:55:18 +0100800 if (IS_ERR(tagged))
801 return PTR_ERR(tagged);
Al Viro74c3cbe2007-07-22 08:04:18 -0400802
Al Viro2096f752010-01-30 13:16:21 -0500803 err = kern_path(old, 0, &path1);
Al Viro74c3cbe2007-07-22 08:04:18 -0400804 if (err) {
805 drop_collected_mounts(tagged);
806 return err;
807 }
Al Viro74c3cbe2007-07-22 08:04:18 -0400808
Al Viro74c3cbe2007-07-22 08:04:18 -0400809 mutex_lock(&audit_filter_mutex);
810 list_add(&barrier, &tree_list);
811 list_add(&cursor, &barrier);
812
813 while (cursor.next != &tree_list) {
814 struct audit_tree *tree;
Al Viro2096f752010-01-30 13:16:21 -0500815 int good_one = 0;
Al Viro74c3cbe2007-07-22 08:04:18 -0400816
817 tree = container_of(cursor.next, struct audit_tree, list);
818 get_tree(tree);
819 list_del(&cursor);
820 list_add(&cursor, &tree->list);
821 mutex_unlock(&audit_filter_mutex);
822
Al Viro2096f752010-01-30 13:16:21 -0500823 err = kern_path(tree->pathname, 0, &path2);
824 if (!err) {
825 good_one = path_is_under(&path1, &path2);
826 path_put(&path2);
Al Viro74c3cbe2007-07-22 08:04:18 -0400827 }
828
Al Viro2096f752010-01-30 13:16:21 -0500829 if (!good_one) {
Al Viro74c3cbe2007-07-22 08:04:18 -0400830 put_tree(tree);
831 mutex_lock(&audit_filter_mutex);
832 continue;
833 }
Al Viro74c3cbe2007-07-22 08:04:18 -0400834
Al Viro1f707132010-01-30 22:51:25 -0500835 failed = iterate_mounts(tag_mount, tree, tagged);
Al Viro74c3cbe2007-07-22 08:04:18 -0400836 if (failed) {
837 put_tree(tree);
838 mutex_lock(&audit_filter_mutex);
839 break;
840 }
841
842 mutex_lock(&audit_filter_mutex);
843 spin_lock(&hash_lock);
844 if (!tree->goner) {
845 list_del(&tree->list);
846 list_add(&tree->list, &tree_list);
847 }
848 spin_unlock(&hash_lock);
849 put_tree(tree);
850 }
851
852 while (barrier.prev != &tree_list) {
853 struct audit_tree *tree;
854
855 tree = container_of(barrier.prev, struct audit_tree, list);
856 get_tree(tree);
857 list_del(&tree->list);
858 list_add(&tree->list, &barrier);
859 mutex_unlock(&audit_filter_mutex);
860
861 if (!failed) {
862 struct node *node;
863 spin_lock(&hash_lock);
864 list_for_each_entry(node, &tree->chunks, list)
865 node->index &= ~(1U<<31);
866 spin_unlock(&hash_lock);
867 } else {
868 trim_marked(tree);
869 }
870
871 put_tree(tree);
872 mutex_lock(&audit_filter_mutex);
873 }
874 list_del(&barrier);
875 list_del(&cursor);
Al Viro74c3cbe2007-07-22 08:04:18 -0400876 mutex_unlock(&audit_filter_mutex);
Al Viro2096f752010-01-30 13:16:21 -0500877 path_put(&path1);
Al Viro74c3cbe2007-07-22 08:04:18 -0400878 drop_collected_mounts(tagged);
879 return failed;
880}
881
Al Viro916d7572009-06-24 00:02:38 -0400882
883static void audit_schedule_prune(void)
884{
Imre Palikf1aaf262015-02-23 15:37:59 -0500885 wake_up_process(prune_thread);
Al Viro916d7572009-06-24 00:02:38 -0400886}
887
888/*
889 * ... and that one is done if evict_chunk() decides to delay until the end
890 * of syscall. Runs synchronously.
891 */
892void audit_kill_trees(struct list_head *list)
893{
894 mutex_lock(&audit_cmd_mutex);
895 mutex_lock(&audit_filter_mutex);
896
897 while (!list_empty(list)) {
898 struct audit_tree *victim;
899
900 victim = list_entry(list->next, struct audit_tree, list);
901 kill_rules(victim);
902 list_del_init(&victim->list);
903
904 mutex_unlock(&audit_filter_mutex);
905
906 prune_one(victim);
907
908 mutex_lock(&audit_filter_mutex);
909 }
910
911 mutex_unlock(&audit_filter_mutex);
912 mutex_unlock(&audit_cmd_mutex);
Al Viro74c3cbe2007-07-22 08:04:18 -0400913}
914
915/*
916 * Here comes the stuff asynchronous to auditctl operations
917 */
918
Al Viro74c3cbe2007-07-22 08:04:18 -0400919static void evict_chunk(struct audit_chunk *chunk)
920{
921 struct audit_tree *owner;
Al Viro916d7572009-06-24 00:02:38 -0400922 struct list_head *postponed = audit_killed_trees();
923 int need_prune = 0;
Al Viro74c3cbe2007-07-22 08:04:18 -0400924 int n;
925
926 if (chunk->dead)
927 return;
928
929 chunk->dead = 1;
930 mutex_lock(&audit_filter_mutex);
931 spin_lock(&hash_lock);
932 while (!list_empty(&chunk->trees)) {
933 owner = list_entry(chunk->trees.next,
934 struct audit_tree, same_root);
935 owner->goner = 1;
936 owner->root = NULL;
937 list_del_init(&owner->same_root);
938 spin_unlock(&hash_lock);
Al Viro916d7572009-06-24 00:02:38 -0400939 if (!postponed) {
940 kill_rules(owner);
941 list_move(&owner->list, &prune_list);
942 need_prune = 1;
943 } else {
944 list_move(&owner->list, postponed);
945 }
Al Viro74c3cbe2007-07-22 08:04:18 -0400946 spin_lock(&hash_lock);
947 }
948 list_del_rcu(&chunk->hash);
949 for (n = 0; n < chunk->count; n++)
950 list_del_init(&chunk->owners[n].list);
951 spin_unlock(&hash_lock);
Imre Palikf1aaf262015-02-23 15:37:59 -0500952 mutex_unlock(&audit_filter_mutex);
Al Viro916d7572009-06-24 00:02:38 -0400953 if (need_prune)
954 audit_schedule_prune();
Al Viro74c3cbe2007-07-22 08:04:18 -0400955}
956
Eric Paris3a9b16b2010-07-28 10:18:38 -0400957static int audit_tree_handle_event(struct fsnotify_group *group,
Jan Kara7053aee2014-01-21 15:48:14 -0800958 struct inode *to_tell,
Eric Parisce8f76f2010-07-28 10:18:39 -0400959 struct fsnotify_mark *inode_mark,
Jan Kara7053aee2014-01-21 15:48:14 -0800960 struct fsnotify_mark *vfsmount_mark,
Al Viro3cd5eca2016-11-20 20:19:09 -0500961 u32 mask, const void *data, int data_type,
Jan Kara45a22f42014-02-17 13:09:50 +0100962 const unsigned char *file_name, u32 cookie)
Al Viro74c3cbe2007-07-22 08:04:18 -0400963{
Jan Kara83c4c4b2014-01-21 15:48:15 -0800964 return 0;
Al Viro74c3cbe2007-07-22 08:04:18 -0400965}
966
Eric Parise61ce862009-12-17 21:24:24 -0500967static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
Al Viro74c3cbe2007-07-22 08:04:18 -0400968{
Eric Paris28a3a7e2009-12-17 20:12:05 -0500969 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
970
971 evict_chunk(chunk);
Miklos Szeredib3e86922012-08-15 12:55:22 +0200972
973 /*
974 * We are guaranteed to have at least one reference to the mark from
975 * either the inode or the caller of fsnotify_destroy_mark().
976 */
977 BUG_ON(atomic_read(&entry->refcnt) < 1);
Al Viro74c3cbe2007-07-22 08:04:18 -0400978}
979
Eric Paris28a3a7e2009-12-17 20:12:05 -0500980static const struct fsnotify_ops audit_tree_ops = {
981 .handle_event = audit_tree_handle_event,
Eric Paris28a3a7e2009-12-17 20:12:05 -0500982 .freeing_mark = audit_tree_freeing_mark,
Al Viro74c3cbe2007-07-22 08:04:18 -0400983};
984
985static int __init audit_tree_init(void)
986{
987 int i;
988
Eric Paris0d2e2a12009-12-17 21:24:22 -0500989 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500990 if (IS_ERR(audit_tree_group))
991 audit_panic("cannot initialize fsnotify group for rectree watches");
Al Viro74c3cbe2007-07-22 08:04:18 -0400992
993 for (i = 0; i < HASH_SIZE; i++)
994 INIT_LIST_HEAD(&chunk_hash_heads[i]);
995
996 return 0;
997}
998__initcall(audit_tree_init);