On Thu, Dec 22, 2016 at 4:15 AM, Jan Kara <jack(a)suse.cz> wrote:
Audit tree currently uses inode pointer as a key into the hash
table.
Getting that from notification mark will be somewhat more difficult with
coming fsnotify changes and there's no reason we really have to use the
inode pointer. So abstract getting of hash key from the audit chunk and
inode so that we can switch to a different key easily later.
CC: Paul Moore <paul(a)paul-moore.com>
Signed-off-by: Jan Kara <jack(a)suse.cz>
---
kernel/audit_tree.c | 39 ++++++++++++++++++++++++++++-----------
1 file changed, 28 insertions(+), 11 deletions(-)
I have no objections with this patch in particular, but in patch 8,
are you certain that inode_to_key() and chunk_to_key() will continue
to return the same key value?
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 156b6a93f4fc..f0859828de09 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -163,33 +163,48 @@ enum {HASH_SIZE = 128};
static struct list_head chunk_hash_heads[HASH_SIZE];
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
-static inline struct list_head *chunk_hash(const struct inode *inode)
+/* Function to return search key in our hash from inode. */
+static unsigned long inode_to_key(const struct inode *inode)
{
- unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
+ return (unsigned long)inode;
+}
+
+/*
+ * Function to return search key in our hash from chunk. Key 0 is special and
+ * should never be present in the hash.
+ */
+static unsigned long chunk_to_key(struct audit_chunk *chunk)
+{
+ return (unsigned long)chunk->mark.inode;
+}
+
+static inline struct list_head *chunk_hash(unsigned long key)
+{
+ unsigned long n = key / L1_CACHE_BYTES;
return chunk_hash_heads + n % HASH_SIZE;
}
/* hash_lock & entry->lock is held by caller */
static void insert_hash(struct audit_chunk *chunk)
{
- struct fsnotify_mark *entry = &chunk->mark;
+ unsigned long key = chunk_to_key(chunk);
struct list_head *list;
- if (!entry->inode)
+ if (!key)
return;
- list = chunk_hash(entry->inode);
+ list = chunk_hash(key);
list_add_rcu(&chunk->hash, list);
}
/* called under rcu_read_lock */
struct audit_chunk *audit_tree_lookup(const struct inode *inode)
{
- struct list_head *list = chunk_hash(inode);
+ unsigned long key = inode_to_key(inode);
+ struct list_head *list = chunk_hash(key);
struct audit_chunk *p;
list_for_each_entry_rcu(p, list, hash) {
- /* mark.inode may have gone NULL, but who cares? */
- if (p->mark.inode == inode) {
+ if (chunk_to_key(p) == key) {
atomic_long_inc(&p->refs);
return p;
}
@@ -585,7 +600,8 @@ int audit_remove_tree_rule(struct audit_krule *rule)
static int compare_root(struct vfsmount *mnt, void *arg)
{
- return d_backing_inode(mnt->mnt_root) == arg;
+ return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
+ (unsigned long)arg;
}
void audit_trim_trees(void)
@@ -620,9 +636,10 @@ void audit_trim_trees(void)
list_for_each_entry(node, &tree->chunks, list) {
struct audit_chunk *chunk = find_chunk(node);
/* this could be NULL if the watch is dying else where... */
- struct inode *inode = chunk->mark.inode;
node->index |= 1U<<31;
- if (iterate_mounts(compare_root, inode, root_mnt))
+ if (iterate_mounts(compare_root,
+ (void *)chunk_to_key(chunk),
+ root_mnt))
node->index &= ~(1U<<31);
}
spin_unlock(&hash_lock);
--
2.10.2
--
paul moore
www.paul-moore.com