Variables pointing to fsnotify_mark are sometimes called 'entry' and
sometimes 'mark'. Use 'mark' in all places.
Signed-off-by: Jan Kara <jack(a)suse.cz>
---
kernel/audit_tree.c | 95 +++++++++++++++++++++++++++--------------------------
1 file changed, 48 insertions(+), 47 deletions(-)
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index ef109000ed01..9c53f7c37bdf 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -158,9 +158,9 @@ static void audit_mark_put_chunk(struct audit_chunk *chunk)
call_rcu(&chunk->head, __put_chunk);
}
-static inline struct audit_tree_mark *audit_mark(struct fsnotify_mark *entry)
+static inline struct audit_tree_mark *audit_mark(struct fsnotify_mark *mark)
{
- return container_of(entry, struct audit_tree_mark, mark);
+ return container_of(mark, struct audit_tree_mark, mark);
}
static struct audit_chunk *mark_chunk(struct fsnotify_mark *mark)
@@ -168,9 +168,9 @@ static struct audit_chunk *mark_chunk(struct fsnotify_mark *mark)
return audit_mark(mark)->chunk;
}
-static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
+static void audit_tree_destroy_watch(struct fsnotify_mark *mark)
{
- kmem_cache_free(audit_tree_mark_cachep, audit_mark(entry));
+ kmem_cache_free(audit_tree_mark_cachep, audit_mark(mark));
}
static struct fsnotify_mark *alloc_mark(void)
@@ -224,7 +224,7 @@ static inline struct list_head *chunk_hash(unsigned long key)
return chunk_hash_heads + n % HASH_SIZE;
}
-/* hash_lock & entry->group->mark_mutex is held by caller */
+/* hash_lock & mark->group->mark_mutex is held by caller */
static void insert_hash(struct audit_chunk *chunk)
{
struct list_head *list;
@@ -278,16 +278,16 @@ static struct audit_chunk *find_chunk(struct node *p)
return container_of(p, struct audit_chunk, owners[0]);
}
-static void replace_mark_chunk(struct fsnotify_mark *entry,
+static void replace_mark_chunk(struct fsnotify_mark *mark,
struct audit_chunk *chunk)
{
struct audit_chunk *old;
assert_spin_locked(&hash_lock);
- old = mark_chunk(entry);
- audit_mark(entry)->chunk = chunk;
+ old = mark_chunk(mark);
+ audit_mark(mark)->chunk = chunk;
if (chunk)
- chunk->mark = entry;
+ chunk->mark = mark;
if (old)
old->mark = NULL;
}
@@ -328,30 +328,30 @@ static void replace_chunk(struct audit_chunk *new, struct
audit_chunk *old,
static void untag_chunk(struct node *p)
{
struct audit_chunk *chunk = find_chunk(p);
- struct fsnotify_mark *entry = chunk->mark;
+ struct fsnotify_mark *mark = chunk->mark;
struct audit_chunk *new = NULL;
struct audit_tree *owner;
int size = chunk->count - 1;
/* Racing with audit_tree_freeing_mark()? */
- if (!entry)
+ if (!mark)
return;
- fsnotify_get_mark(entry);
+ fsnotify_get_mark(mark);
spin_unlock(&hash_lock);
if (size)
new = alloc_chunk(size);
- mutex_lock(&entry->group->mark_mutex);
+ mutex_lock(&mark->group->mark_mutex);
/*
* mark_mutex stabilizes chunk attached to the mark so we can check
* whether it didn't change while we've dropped hash_lock.
*/
- if (!(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED) ||
- mark_chunk(entry) != chunk) {
- mutex_unlock(&entry->group->mark_mutex);
+ if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) ||
+ mark_chunk(mark) != chunk) {
+ mutex_unlock(&mark->group->mark_mutex);
kfree(new);
goto out;
}
@@ -365,12 +365,12 @@ static void untag_chunk(struct node *p)
owner->root = NULL;
list_del_init(&p->list);
list_del_rcu(&chunk->hash);
- replace_mark_chunk(entry, NULL);
+ replace_mark_chunk(mark, NULL);
spin_unlock(&hash_lock);
- fsnotify_detach_mark(entry);
- mutex_unlock(&entry->group->mark_mutex);
+ fsnotify_detach_mark(mark);
+ mutex_unlock(&mark->group->mark_mutex);
audit_mark_put_chunk(chunk);
- fsnotify_free_mark(entry);
+ fsnotify_free_mark(mark);
goto out;
}
@@ -389,7 +389,7 @@ static void untag_chunk(struct node *p)
*/
replace_chunk(new, chunk, p);
spin_unlock(&hash_lock);
- mutex_unlock(&entry->group->mark_mutex);
+ mutex_unlock(&mark->group->mark_mutex);
audit_mark_put_chunk(chunk);
goto out;
@@ -404,16 +404,16 @@ static void untag_chunk(struct node *p)
p->owner = NULL;
put_tree(owner);
spin_unlock(&hash_lock);
- mutex_unlock(&entry->group->mark_mutex);
+ mutex_unlock(&mark->group->mark_mutex);
out:
- fsnotify_put_mark(entry);
+ fsnotify_put_mark(mark);
spin_lock(&hash_lock);
}
/* Call with group->mark_mutex held, releases it */
static int create_chunk(struct inode *inode, struct audit_tree *tree)
{
- struct fsnotify_mark *entry;
+ struct fsnotify_mark *mark;
struct audit_chunk *chunk = alloc_chunk(1);
if (!chunk) {
@@ -421,16 +421,16 @@ static int create_chunk(struct inode *inode, struct audit_tree
*tree)
return -ENOMEM;
}
- entry = alloc_mark();
- if (!entry) {
+ mark = alloc_mark();
+ if (!mark) {
mutex_unlock(&audit_tree_group->mark_mutex);
kfree(chunk);
return -ENOMEM;
}
- if (fsnotify_add_inode_mark_locked(entry, inode, 0)) {
+ if (fsnotify_add_inode_mark_locked(mark, inode, 0)) {
mutex_unlock(&audit_tree_group->mark_mutex);
- fsnotify_put_mark(entry);
+ fsnotify_put_mark(mark);
kfree(chunk);
return -ENOSPC;
}
@@ -438,14 +438,14 @@ static int create_chunk(struct inode *inode, struct audit_tree
*tree)
spin_lock(&hash_lock);
if (tree->goner) {
spin_unlock(&hash_lock);
- fsnotify_detach_mark(entry);
+ fsnotify_detach_mark(mark);
mutex_unlock(&audit_tree_group->mark_mutex);
- fsnotify_free_mark(entry);
- fsnotify_put_mark(entry);
+ fsnotify_free_mark(mark);
+ fsnotify_put_mark(mark);
kfree(chunk);
return 0;
}
- replace_mark_chunk(entry, chunk);
+ replace_mark_chunk(mark, chunk);
chunk->owners[0].index = (1U << 31);
chunk->owners[0].owner = tree;
get_tree(tree);
@@ -467,21 +467,21 @@ static int create_chunk(struct inode *inode, struct audit_tree
*tree)
* we get notification through ->freeing_mark callback and cleanup
* chunk pointing to this mark.
*/
- fsnotify_put_mark(entry);
+ fsnotify_put_mark(mark);
return 0;
}
/* the first tagged inode becomes root of tree */
static int tag_chunk(struct inode *inode, struct audit_tree *tree)
{
- struct fsnotify_mark *entry;
+ struct fsnotify_mark *mark;
struct audit_chunk *chunk, *old;
struct node *p;
int n;
mutex_lock(&audit_tree_group->mark_mutex);
- entry = fsnotify_find_mark(&inode->i_fsnotify_marks, audit_tree_group);
- if (!entry)
+ mark = fsnotify_find_mark(&inode->i_fsnotify_marks, audit_tree_group);
+ if (!mark)
return create_chunk(inode, tree);
/*
@@ -491,12 +491,12 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
*/
/* are we already there? */
spin_lock(&hash_lock);
- old = mark_chunk(entry);
+ old = mark_chunk(mark);
for (n = 0; n < old->count; n++) {
if (old->owners[n].owner == tree) {
spin_unlock(&hash_lock);
mutex_unlock(&audit_tree_group->mark_mutex);
- fsnotify_put_mark(entry);
+ fsnotify_put_mark(mark);
return 0;
}
}
@@ -505,7 +505,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
chunk = alloc_chunk(old->count + 1);
if (!chunk) {
mutex_unlock(&audit_tree_group->mark_mutex);
- fsnotify_put_mark(entry);
+ fsnotify_put_mark(mark);
return -ENOMEM;
}
@@ -513,7 +513,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
if (tree->goner) {
spin_unlock(&hash_lock);
mutex_unlock(&audit_tree_group->mark_mutex);
- fsnotify_put_mark(entry);
+ fsnotify_put_mark(mark);
kfree(chunk);
return 0;
}
@@ -533,7 +533,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
replace_chunk(chunk, old, NULL);
spin_unlock(&hash_lock);
mutex_unlock(&audit_tree_group->mark_mutex);
- fsnotify_put_mark(entry); /* pair to fsnotify_find mark_entry */
+ fsnotify_put_mark(mark); /* pair to fsnotify_find_mark */
audit_mark_put_chunk(old);
return 0;
@@ -1040,16 +1040,17 @@ static int audit_tree_handle_event(struct fsnotify_group *group,
return 0;
}
-static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group
*group)
+static void audit_tree_freeing_mark(struct fsnotify_mark *mark,
+ struct fsnotify_group *group)
{
struct audit_chunk *chunk;
- mutex_lock(&entry->group->mark_mutex);
+ mutex_lock(&mark->group->mark_mutex);
spin_lock(&hash_lock);
- chunk = mark_chunk(entry);
- replace_mark_chunk(entry, NULL);
+ chunk = mark_chunk(mark);
+ replace_mark_chunk(mark, NULL);
spin_unlock(&hash_lock);
- mutex_unlock(&entry->group->mark_mutex);
+ mutex_unlock(&mark->group->mark_mutex);
if (chunk) {
evict_chunk(chunk);
audit_mark_put_chunk(chunk);
@@ -1059,7 +1060,7 @@ static void audit_tree_freeing_mark(struct fsnotify_mark *entry,
struct fsnotify
* We are guaranteed to have at least one reference to the mark from
* either the inode or the caller of fsnotify_destroy_mark().
*/
- BUG_ON(refcount_read(&entry->refcnt) < 1);
+ BUG_ON(refcount_read(&mark->refcnt) < 1);
}
static const struct fsnotify_ops audit_tree_ops = {
--
2.16.4