We've asked Al to take a another look over the patch and provide a
second round of feedback...
On Wed, 2005-04-27 at 18:17 -0500, Timothy R. Chavez wrote:
+int audit_list_watches(int pid, int seq)
+{
+ int ret = 0;
+ struct audit_wentry *wentry;
+ struct hlist_node *pos;
+
+ spin_lock(&audit_master_watchlist_lock);
+ hlist_for_each_entry(wentry, pos, &audit_master_watchlist, w_master) {
+ ret = audit_send_watch(pid, seq, wentry->w_watch);
OK, so audit_send_watch() is called with a spinlock held, and hence may
not sleep...
+int audit_send_watch(int pid, int seq, struct audit_watch *watch)
+{
+ int ret = 0;
+ void *memblk;
+ unsigned int offset;
+ unsigned int total;
+ struct audit_data *data;
+ struct audit_wentry *wentry;
+ struct audit_transport req;
+ struct nameidata nd;
+
+ req.valid = 0;
+ req.dev_major = MAJOR(watch->dev);
+ req.dev_minor = MINOR(watch->dev);
+ req.perms = watch->perms;
+ req.pathlen = strlen(watch->path) + 1;
+ if (watch->filterkey)
+ req.fklen = strlen(watch->filterkey) + 1;
+ else
+ req.fklen = 0;
+
+ ret = -ENOMEM;
+ total = sizeof(req) + req.pathlen + req.fklen;
+ memblk = kmalloc(total, GFP_KERNEL);
BANG.
+ if (!memblk)
+ goto audit_send_watch_exit;
+
+ /* See if path to watch is valid */
+ ret = path_lookup(watch->path, LOOKUP_PARENT, &nd);
BANG.
+ if (!ret) {
+ data = nd.dentry->d_inode->i_audit;
+ wentry = audit_wentry_fetch_lock(watch->name, data);
+ if (wentry && nd.dentry->d_inode->i_sb->s_dev ==
watch->dev)
+ req.valid = 1;
+ audit_wentry_put(wentry);
+ path_release(&nd);
+ }
+
+ /* Payload */
+ memcpy(memblk, &req, sizeof(req));
+ offset = total - req.fklen;
+ memcpy(memblk + offset, watch->filterkey, req.fklen);
+ offset = offset - req.pathlen;
+ memcpy(memblk + offset, watch->path, req.pathlen);
+
+ audit_send_reply(pid, seq, AUDIT_WATCH_LIST, 0, 1,
+ memblk, total);
BANG.
+ kfree(memblk);
+
+ ret = 0;
+
+audit_send_watch_exit:
+ return ret;
+}
Other feedback includes...
Example above is by far the most ridiculous, but the rest [of the
locking] is also very bad. Stuff like
write_lock(&data->lock);
...
write_unlock(&data->lock);
kfree(data);
is not a good sign and while in this case lock is pure obfuscation
(fortunately - if it could be contended here, we would be screwed) the
entire thing is on that level. And they are going for "fine-grained" -
i.e. drop and reacquire locks at the rate that would create [lots]
of bus traffic on SMP box with nothing approaching a good reason.
Note that the only comment they did not ignore was about LOOKUP_NOFOLLOW
having no effect when combined with LOOKUP_PARENT - everything else is
left as-is.
--
dwmw2