This patch includes several fixes and some new features for the inotify kernel
API.
Fixes:
- always call event handler for IN_IGNORED last, in case caller
frees watch memory
- don't leak memory with IN_ONESHOT events
- don't leak memory in inotify_destroy()
- fix memory management: make {get,put}_inotify_watch public
- have caller register a destroy_watch op to free watch memory on last
put_inotify_watch
Features:
- make inotify_remove_watch_locked() public for use in event callback
- add inotify_init_watch() so caller can grab a reference before calling
inotify_add_watch(), after which the watch is subject to auto-removal
- in inotify_find_watch(), grab a ref for caller if watch is found
Other:
- update inotify documentation
Signed-off-by: Amy Griffis <amy.griffis(a)hp.com>
--
Documentation/filesystems/inotify.txt | 130 ++++++++++++++++++++++++++++++++--
fs/inotify.c | 124 ++++++++++++++++++--------------
fs/inotify_user.c | 34 +++++---
include/linux/inotify.h | 42 ++++++++--
4 files changed, 251 insertions(+), 79 deletions(-)
diff --git a/Documentation/filesystems/inotify.txt
b/Documentation/filesystems/inotify.txt
index 6d50190..59a919f 100644
--- a/Documentation/filesystems/inotify.txt
+++ b/Documentation/filesystems/inotify.txt
@@ -69,17 +69,135 @@ Prototypes:
int inotify_rm_watch (int fd, __u32 mask);
-(iii) Internal Kernel Implementation
+(iii) Kernel Interface
-Each inotify instance is associated with an inotify_device structure.
+Inotify's kernel API consists a set of functions for managing watches and an
+event callback.
+
+To use the kernel API, you must first initialize an inotify instance with a set
+of inotify_operations. You are given an opaque inotify_handle, which you use
+for any further calls to inotify.
+
+ struct inotify_handle *ih = inotify_init(my_event_handler);
+
+You must provide a function for processing events and a function for destroying
+the inotify watch.
+
+ void handle_event(struct inotify_watch *watch, u32 wd, u32 mask,
+ u32 cookie, const char *name, struct inode *inode)
+
+ watch - the pointer to the inotify_watch that triggered this call
+ wd - the watch descriptor
+ mask - describes the event that occurred
+ cookie - an identifier for synchronizing events
+ name - the dentry name for affected files in a directory-based event
+ inode - the affected inode in a directory-based event
+
+ void destroy_watch(struct inotify_watch *watch)
+
+You may add watches by providing a pre-allocated and initialized inotify_watch
+structure and specifying the inode to watch along with an inotify event mask.
+You must pin the inode during the call. You will likely wish to embed the
+inotify_watch structure in a structure of your own which contains other
+information about the watch. Once you add an inotify watch, it is immediately
+subject to removal depending on filesystem events. You must grab a reference if
+you depend on the watch hanging around after the call.
+
+ inotify_init_watch(&my_watch->iwatch);
+ inotify_get_watch(&my_watch->iwatch); // optional
+ s32 wd = inotify_add_watch(ih, &my_watch->iwatch, inode, mask);
+ inotify_put_watch(&my_watch->iwatch); // optional
+
+You may use the watch descriptor (wd) or the address of the inotify_watch for
+other inotify operations. You must not directly read or manipulate data in the
+inotify_watch. Additionally, you must not call inotify_add_watch() more than
+once for a given inotify_watch structure, unless you have first called either
+inotify_rm_watch() or inotify_rm_wd().
+
+To determine if you have already registered a watch for a given inode, you may
+call inotify_find_watch(), which gives you both the wd and the watch pointer for
+the inotify_watch, or an error if the watch does not exist.
+
+ wd = inotify_find_watch(ih, inode, &watchp);
+
+You may use container_of() on the watch pointer to access your own data
+associated with a given watch. When an existing watch is found,
+inotify_find_watch() bumps the refcount before releasing its locks. You must
+put that reference with:
+
+ put_inotify_watch(watchp);
+
+Call inotify_find_update_watch() to update the event mask for an existing watch.
+inotify_find_update_watch() returns the wd of the updated watch, or an error if
+the watch does not exist.
+
+ wd = inotify_find_update_watch(ih, inode, mask);
+
+An existing watch may be removed by calling either inotify_rm_watch() or
+inotify_rm_wd().
+
+ int ret = inotify_rm_watch(ih, &my_watch->iwatch);
+ int ret = inotify_rm_wd(ih, wd);
+
+A watch may be removed while executing your event handler with the following:
+
+ inotify_remove_watch_locked(ih, iwatch);
+
+Call inotify_destroy() to remove all watches from your inotify instance and
+release it. If there are no outstanding references, inotify_destroy() will call
+your destroy_watch op for each watch.
+
+ inotify_destroy(ih);
+
+When inotify removes a watch, it sends an IN_IGNORED event to your callback.
+You may use this event as an indication to free the watch memory. Note that
+inotify may remove a watch due to filesystem events, as well as by your request.
+If you use IN_ONESHOT, inotify will remove the watch after the first event, at
+which point you may call the final inotify_put_watch.
+
+(iv) Kernel Interface Prototypes
+
+ struct inotify_handle *inotify_init(struct inotify_operations *ops);
+
+ inotify_init_watch(struct inotify_watch *watch);
+
+ s32 inotify_add_watch(struct inotify_handle *ih,
+ struct inotify_watch *watch,
+ struct inode *inode, u32 mask);
+
+ s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode,
+ struct inotify_watch **watchp);
+
+ s32 inotify_find_update_watch(struct inotify_handle *ih,
+ struct inode *inode, u32 mask);
+
+ int inotify_rm_wd(struct inotify_handle *ih, u32 wd);
+
+ int inotify_rm_watch(struct inotify_handle *ih,
+ struct inotify_watch *watch);
+
+ void inotify_remove_watch_locked(struct inotify_handle *ih,
+ struct inotify_watch *watch);
+
+ void inotify_destroy(struct inotify_handle *ih);
+
+ void get_inotify_watch(struct inotify_watch *watch);
+ void put_inotify_watch(struct inotify_watch *watch);
+
+
+(v) Internal Kernel Implementation
+
+Each inotify instance is represented by an inotify_handle structure.
+Inotify's userspace consumers also have an inotify_device which is
+associated with the inotify_handle, and on which events are queued.
Each watch is associated with an inotify_watch structure. Watches are chained
-off of each associated device and each associated inode.
+off of each associated inotify_handle and each associated inode.
-See fs/inotify.c for the locking and lifetime rules.
+See fs/inotify.c and fs/inotify_user.c for the locking and lifetime rules.
-(iv) Rationale
+(vi) Rationale
Q: What is the design decision behind not tying the watch to the open fd of
the watched object?
@@ -145,7 +263,7 @@ A: The poor user-space interface is the
file descriptor-based one that allows basic file I/O and poll/select.
Obtaining the fd and managing the watches could have been done either via a
device file or a family of new system calls. We decided to implement a
- family of system calls because that is the preffered approach for new kernel
+ family of system calls because that is the preferred approach for new kernel
interfaces. The only real difference was whether we wanted to use open(2)
and ioctl(2) or a couple of new system calls. System calls beat ioctls.
diff --git a/fs/inotify.c b/fs/inotify.c
index 7e75731..16d65ba 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -5,7 +5,10 @@
* John McCutchan <ttb(a)tentacle.dhs.org>
* Robert Love <rml(a)novell.com>
*
+ * Kernel API added by: Amy Griffis <amy.griffis(a)hp.com>
+ *
* Copyright (C) 2005 John McCutchan
+ * Copyright 2006 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -41,16 +44,9 @@ static atomic_t inotify_cookie;
* inotify_handle->mutex (protects inotify_handle and watches->h_list)
*
* The inode->inotify_mutex and inotify_handle->mutex and held during execution
- * of a caller's event callback. Thus, the caller must not hold any locks
- * taking during callback processing while calling any of the published inotify
- * routines, i.e.
- * inotify_init
- * inotify_destroy
- * inotify_find_watch
- * inotify_find_update_watch
- * inotify_add_watch
- * inotify_rm_wd
- * inotify_rm_watch
+ * of a caller's event handler. Thus, the caller must not hold any locks
+ * taken in their event handler while calling any of the published inotify
+ * interfaces.
*/
/*
@@ -61,9 +57,12 @@ static atomic_t inotify_cookie;
* Additional references can bump the count via get_inotify_handle() and drop
* the count via put_inotify_handle().
*
- * inotify_watch: Lifetime is from inotify_add_watch() to
- * remove_watch_no_event(). Additional references can bump the count via
- * get_inotify_watch() and drop the count via put_inotify_watch().
+ * inotify_watch: for inotify's purposes, lifetime is from inotify_add_watch()
+ * to remove_watch_no_event(). Additional references can bump the count via
+ * get_inotify_watch() and drop the count via put_inotify_watch(). The caller
+ * is reponsible for the final put after receiving IN_IGNORED, or when using
+ * IN_ONESHOT after receiving the first event. Inotify does the final put when
+ * the caller calls inotify_destroy().
*
* inode: Pinned so long as the inode is associated with a watch, from
* inotify_add_watch() to the final put_inotify_watch().
@@ -80,8 +79,7 @@ struct inotify_handle {
struct list_head watches; /* list of watches */
atomic_t count; /* reference count */
u32 last_wd; /* the last wd allocated */
- void (*callback)(struct inotify_watch *, u32, u32, u32, const char *,
- struct inode *); /* event callback */
+ const struct inotify_operations *in_ops; /* inotify caller operations */
};
static inline void get_inotify_handle(struct inotify_handle *ih)
@@ -97,23 +95,33 @@ static inline void put_inotify_handle(st
}
}
-static inline void get_inotify_watch(struct inotify_watch *watch)
+/**
+ * get_inotify_watch - grab a reference to an inotify_watch
+ * @watch: watch to grab
+ */
+void get_inotify_watch(struct inotify_watch *watch)
{
atomic_inc(&watch->count);
}
+EXPORT_SYMBOL_GPL(get_inotify_watch);
-/*
+/**
* put_inotify_watch - decrements the ref count on a given watch. cleans up
* watch references if the count reaches zero. inotify_watch is freed by
- * inotify callers.
+ * inotify callers via the destroy_watch() op.
+ * @watch: watch to release
*/
-static inline void put_inotify_watch(struct inotify_watch *watch)
+void put_inotify_watch(struct inotify_watch *watch)
{
if (atomic_dec_and_test(&watch->count)) {
- put_inotify_handle(watch->ih);
+ struct inotify_handle *ih = watch->ih;
+
iput(watch->inode);
+ ih->in_ops->destroy_watch(watch);
+ put_inotify_handle(ih);
}
}
+EXPORT_SYMBOL_GPL(put_inotify_watch);
/*
* inotify_handle_get_wd - returns the next WD for use by the given handle
@@ -199,10 +207,9 @@ static struct inotify_watch *inode_find_
}
/*
- * remove_watch_no_event - remove_watch() without the IN_IGNORED event.
+ * remove_watch_no_event - remove watch without the IN_IGNORED event.
*
- * Callers must hold both inode->inotify_mutex and ih->mutex. We may drop a
- * reference to the inode before returning.
+ * Callers must hold both inode->inotify_mutex and ih->mutex.
*/
static void remove_watch_no_event(struct inotify_watch *watch,
struct inotify_handle *ih)
@@ -214,20 +221,24 @@ static void remove_watch_no_event(struct
set_dentry_child_flags(watch->inode, 0);
idr_remove(&ih->idr, watch->wd);
- put_inotify_watch(watch); /* put matching get in inotify_add_watch() */
}
-/*
- * remove_watch - Remove a watch from both the handle and the inode. Sends
- * the IN_IGNORED event signifying that the inode is no longer watched.
+/**
+ * inotify_remove_watch_locked - Remove a watch from both the handle and the
+ * inode. Sends the IN_IGNORED event signifying that the inode is no longer
+ * watched. May be called from a caller's event handler.
+ * @ih: inotify handle associated with watch
+ * @watch: watch to remove
*
* Callers must hold both inode->inotify_mutex and ih->mutex.
*/
-static void remove_watch(struct inotify_watch *watch, struct inotify_handle *ih)
+void inotify_remove_watch_locked(struct inotify_handle *ih,
+ struct inotify_watch *watch)
{
- ih->callback(watch, watch->wd, IN_IGNORED, 0, NULL, NULL);
remove_watch_no_event(watch, ih);
+ ih->in_ops->handle_event(watch, watch->wd, IN_IGNORED, 0, NULL, NULL);
}
+EXPORT_SYMBOL_GPL(inotify_remove_watch_locked);
/* Kernel API for producing events */
@@ -284,11 +295,11 @@ void inotify_inode_queue_event(struct in
u32 watch_mask = watch->mask;
if (watch_mask & mask) {
struct inotify_handle *ih= watch->ih;
- ih->callback(watch, watch->wd, mask, cookie, name,
- a_inode);
mutex_lock(&ih->mutex);
if (watch_mask & IN_ONESHOT)
remove_watch_no_event(watch, ih);
+ ih->in_ops->handle_event(watch, watch->wd, mask, cookie,
+ name, a_inode);
mutex_unlock(&ih->mutex);
}
}
@@ -372,7 +383,7 @@ void inotify_unmount_inodes(struct list_
need_iput_tmp = need_iput;
need_iput = NULL;
- /* In case the remove_watch() drops a reference. */
+ /* In case inotify_remove_watch_locked() drops a reference. */
if (inode != need_iput_tmp)
__iget(inode);
else
@@ -402,10 +413,10 @@ void inotify_unmount_inodes(struct list_
watches = &inode->inotify_watches;
list_for_each_entry_safe(watch, next_w, watches, i_list) {
struct inotify_handle *ih= watch->ih;
- ih->callback(watch, watch->wd, IN_UNMOUNT, 0, NULL,
- inode);
mutex_lock(&ih->mutex);
- remove_watch(watch, ih);
+ ih->in_ops->handle_event(watch, watch->wd, IN_UNMOUNT, 0,
+ NULL, inode);
+ inotify_remove_watch_locked(ih, watch);
mutex_unlock(&ih->mutex);
}
mutex_unlock(&inode->inotify_mutex);
@@ -428,7 +439,7 @@ void inotify_inode_is_dead(struct inode
list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
struct inotify_handle *ih = watch->ih;
mutex_lock(&ih->mutex);
- remove_watch(watch, ih);
+ inotify_remove_watch_locked(ih, watch);
mutex_unlock(&ih->mutex);
}
mutex_unlock(&inode->inotify_mutex);
@@ -439,11 +450,9 @@ EXPORT_SYMBOL_GPL(inotify_inode_is_dead)
/**
* inotify_init - allocate and initialize an inotify instance
- * @cb: event callback function
+ * @ops: caller's inotify operations
*/
-struct inotify_handle *inotify_init(void (*cb)(struct inotify_watch *, u32,
- u32, u32, const char *,
- struct inode *))
+struct inotify_handle *inotify_init(const struct inotify_operations *ops)
{
struct inotify_handle *ih;
@@ -455,7 +464,7 @@ struct inotify_handle *inotify_init(void
INIT_LIST_HEAD(&ih->watches);
mutex_init(&ih->mutex);
ih->last_wd = 0;
- ih->callback = cb;
+ ih->in_ops = ops;
atomic_set(&ih->count, 0);
get_inotify_handle(ih);
@@ -464,6 +473,19 @@ struct inotify_handle *inotify_init(void
EXPORT_SYMBOL_GPL(inotify_init);
/**
+ * inotify_init_watch - initialize an inotify watch
+ * @watch: watch to initialize
+ */
+void inotify_init_watch(struct inotify_watch *watch)
+{
+ INIT_LIST_HEAD(&watch->h_list);
+ INIT_LIST_HEAD(&watch->i_list);
+ atomic_set(&watch->count, 0);
+ get_inotify_watch(watch); /* initial get */
+}
+EXPORT_SYMBOL_GPL(inotify_init_watch);
+
+/**
* inotify_destroy - clean up and destroy an inotify instance
* @ih: inotify handle
*/
@@ -495,8 +517,10 @@ void inotify_destroy(struct inotify_hand
mutex_lock(&ih->mutex);
/* make sure we didn't race with another list removal */
- if (likely(idr_find(&ih->idr, watch->wd)))
+ if (likely(idr_find(&ih->idr, watch->wd))) {
remove_watch_no_event(watch, ih);
+ put_inotify_watch(watch);
+ }
mutex_unlock(&ih->mutex);
mutex_unlock(&inode->inotify_mutex);
@@ -512,7 +536,7 @@ EXPORT_SYMBOL_GPL(inotify_destroy);
* inotify_find_watch - find an existing watch for an (ih,inode) pair
* @ih: inotify handle
* @inode: inode to watch
- * @watchp: ptr to existing inotify_watch
+ * @watchp: pointer to existing inotify_watch
*
* Caller must pin given inode (via nameidata).
*/
@@ -527,6 +551,7 @@ s32 inotify_find_watch(struct inotify_ha
old = inode_find_handle(inode, ih);
if (unlikely(old)) {
+ get_inotify_watch(old); /* caller must put watch */
*watchp = old;
ret = old->wd;
}
@@ -606,6 +631,7 @@ s32 inotify_add_watch(struct inotify_han
mask &= IN_ALL_EVENTS | IN_ONESHOT;
if (unlikely(!mask))
return -EINVAL;
+ watch->mask = mask;
mutex_lock(&inode->inotify_mutex);
mutex_lock(&ih->mutex);
@@ -614,11 +640,7 @@ s32 inotify_add_watch(struct inotify_han
ret = inotify_handle_get_wd(ih, watch);
if (unlikely(ret))
goto out;
-
- watch->mask = mask;
- atomic_set(&watch->count, 0);
- INIT_LIST_HEAD(&watch->h_list);
- INIT_LIST_HEAD(&watch->i_list);
+ ret = watch->wd;
/* save a reference to handle and bump the count to make it official */
get_inotify_handle(ih);
@@ -630,16 +652,12 @@ s32 inotify_add_watch(struct inotify_han
*/
watch->inode = igrab(inode);
- /* bump our own count, corresponding to our entry in ih->watches */
- get_inotify_watch(watch);
-
if (!inotify_inode_watched(inode))
set_dentry_child_flags(inode, 1);
/* Add the watch to the handle's and the inode's list */
list_add(&watch->h_list, &ih->watches);
list_add(&watch->i_list, &inode->inotify_watches);
- ret = watch->wd;
out:
mutex_unlock(&ih->mutex);
mutex_unlock(&inode->inotify_mutex);
@@ -674,7 +692,7 @@ int inotify_rm_wd(struct inotify_handle
/* make sure that we did not race */
if (likely(idr_find(&ih->idr, wd) == watch))
- remove_watch(watch, ih);
+ inotify_remove_watch_locked(ih, watch);
mutex_unlock(&ih->mutex);
mutex_unlock(&inode->inotify_mutex);
diff --git a/fs/inotify_user.c b/fs/inotify_user.c
index 1ed17ae..9e9931e 100644
--- a/fs/inotify_user.c
+++ b/fs/inotify_user.c
@@ -1,11 +1,12 @@
/*
- * fs/inotify.c - inode-based file event notifications
+ * fs/inotify_user.c - inotify support for userspace
*
* Authors:
* John McCutchan <ttb(a)tentacle.dhs.org>
* Robert Love <rml(a)novell.com>
*
* Copyright (C) 2005 John McCutchan
+ * Copyright 2006 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -60,7 +61,8 @@ int inotify_max_queued_events __read_mos
* via get_inotify_dev() and drop the count via put_inotify_dev().
*
* inotify_user_watch: Lifetime is from create_watch() to the receipt of an
- * IN_IGNORED event from inotify.
+ * IN_IGNORED event from inotify, or when using IN_ONESHOT, to receipt of the
+ * first event, or to inotify_destroy().
*/
/*
@@ -160,11 +162,15 @@ static inline void put_inotify_dev(struc
}
/*
- * free_inotify_user_watch - * cleans up the watch and its references
+ * free_inotify_user_watch - cleans up the watch and its references
*/
-static inline void free_inotify_user_watch(struct inotify_user_watch *watch)
+static void free_inotify_user_watch(struct inotify_watch *w)
{
- struct inotify_device *dev = watch->dev;
+ struct inotify_user_watch *watch;
+ struct inotify_device *dev;
+
+ watch = container_of(w, struct inotify_user_watch, wdata);
+ dev = watch->dev;
atomic_dec(&dev->user->inotify_watches);
put_inotify_dev(dev);
@@ -241,7 +247,7 @@ inotify_dev_get_event(struct inotify_dev
}
/*
- * inotify_dev_queue_event - event callback registered with core inotify, adds
+ * inotify_dev_queue_event - event handler registered with core inotify, adds
* a new event to the given device
*
* Can sleep (calls kernel_event()).
@@ -262,8 +268,8 @@ static void inotify_dev_queue_event(stru
/* we can safely put the watch as we don't reference it while
* generating the event
*/
- if (mask & IN_IGNORED)
- free_inotify_user_watch(watch);
+ if (mask & IN_IGNORED || mask & IN_ONESHOT)
+ put_inotify_watch(w); /* final put */
/* coalescing: drop this event if it is a dupe of the previous */
last = inotify_dev_get_event(dev);
@@ -374,15 +380,14 @@ static int create_watch(struct inotify_d
atomic_inc(&dev->user->inotify_watches);
+ inotify_init_watch(&watch->wdata);
ret = inotify_add_watch(dev->ih, &watch->wdata, inode, mask);
if (ret < 0)
- free_inotify_user_watch(watch);
+ free_inotify_user_watch(&watch->wdata);
return ret;
}
-/* Kernel API */
-
/* Device Interface */
static unsigned int inotify_poll(struct file *file, poll_table *wait)
@@ -521,6 +526,11 @@ static const struct file_operations inot
.compat_ioctl = inotify_ioctl,
};
+static const struct inotify_operations inotify_user_ops = {
+ .handle_event = inotify_dev_queue_event,
+ .destroy_watch = free_inotify_user_watch,
+};
+
asmlinkage long sys_inotify_init(void)
{
struct inotify_device *dev;
@@ -552,7 +562,7 @@ asmlinkage long sys_inotify_init(void)
goto out_free_uid;
}
- ih = inotify_init(inotify_dev_queue_event);
+ ih = inotify_init(&inotify_user_ops);
if (unlikely(IS_ERR(ih))) {
ret = PTR_ERR(ih);
goto out_free_dev;
diff --git a/include/linux/inotify.h b/include/linux/inotify.h
index 56de697..ec063b9 100644
--- a/include/linux/inotify.h
+++ b/include/linux/inotify.h
@@ -75,6 +75,10 @@ #include <linux/config.h>
* h_list is protected by ih->mutex of the associated inotify_handle.
* i_list, mask are protected by inode->inotify_mutex of the associated inode.
* ih, inode, and wd are never written to once the watch is created.
+ *
+ * Callers must use the established inotify interfaces to access inotify_watch
+ * contents. The content of this structure is private to the inotify
+ * implementation.
*/
struct inotify_watch {
struct list_head h_list; /* entry in inotify_handle's list */
@@ -86,6 +90,12 @@ struct inotify_watch {
__u32 mask; /* event mask for this watch */
};
+struct inotify_operations {
+ void (*handle_event)(struct inotify_watch *, u32, u32, u32,
+ const char *, struct inode *);
+ void (*destroy_watch)(struct inotify_watch *);
+};
+
#ifdef CONFIG_INOTIFY
/* Kernel API for producing events */
@@ -102,10 +112,8 @@ extern u32 inotify_get_cookie(void);
/* Kernel Consumer API */
-extern struct inotify_handle *inotify_init(void (*)(struct inotify_watch *,
- __u32, __u32, __u32,
- const char *,
- struct inode *));
+extern struct inotify_handle *inotify_init(const struct inotify_operations *);
+extern void inotify_init_watch(struct inotify_watch *);
extern void inotify_destroy(struct inotify_handle *);
extern __s32 inotify_find_watch(struct inotify_handle *, struct inode *,
struct inotify_watch **);
@@ -115,6 +123,10 @@ extern __s32 inotify_add_watch(struct in
struct inode *, __u32);
extern int inotify_rm_watch(struct inotify_handle *, struct inotify_watch *);
extern int inotify_rm_wd(struct inotify_handle *, __u32);
+extern void inotify_remove_watch_locked(struct inotify_handle *,
+ struct inotify_watch *);
+extern void get_inotify_watch(struct inotify_watch *);
+extern void put_inotify_watch(struct inotify_watch *);
#else
@@ -153,14 +165,15 @@ static inline u32 inotify_get_cookie(voi
return 0;
}
-static inline struct inotify_handle *inotify_init(void (*cb)(
- struct inotify_watch *,
- __u32, __u32, __u32,
- const char *, struct inode *))
+static inline struct inotify_handle *inotify_init(const struct inotify_operations *)
{
return ERR_PTR(-EOPNOTSUPP);
}
+static inline void inotify_init_watch(struct inotify_watch *watch)
+{
+}
+
static inline void inotify_destroy(struct inotify_handle *ih)
{
}
@@ -196,6 +209,19 @@ static inline int inotify_rm_wd(struct i
return -EOPNOTSUPP;
}
+static inline void inotify_remove_watch_locked(struct inotify_handle *ih,
+ struct inotify_watch *watch)
+{
+}
+
+static inline void get_inotify_watch(struct inotify_watch *watch)
+{
+}
+
+static inline void put_inotify_watch(struct inotify_watch *watch)
+{
+}
+
#endif /* CONFIG_INOTIFY */
#endif /* __KERNEL __ */