Commit 3bb66d7f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.infradead.org/users/eparis/notify

* 'for-linus' of git://git.infradead.org/users/eparis/notify:
  fsnotify: allow groups to set freeing_mark to null
  inotify/dnotify: should_send_event shouldn't match on FS_EVENT_ON_CHILD
  dnotify: do not bother to lock entry->lock when reading mask
  dnotify: do not use ?true:false when assigning to a bool
  fsnotify: move events should indicate the event was on a child
  inotify: reimplement inotify using fsnotify
  fsnotify: handle filesystem unmounts with fsnotify marks
  fsnotify: fsnotify marks on inodes pin them in core
  fsnotify: allow groups to add private data to events
  fsnotify: add correlations between events
  fsnotify: include pathnames with entries when possible
  fsnotify: generic notification queue and waitq
  dnotify: reimplement dnotify using fsnotify
  fsnotify: parent event notification
  fsnotify: add marks to inodes so groups can interpret how to handle those inodes
  fsnotify: unified filesystem notification backend
parents 512626a0 a092ee20
...@@ -1802,10 +1802,10 @@ F: drivers/char/epca* ...@@ -1802,10 +1802,10 @@ F: drivers/char/epca*
F: drivers/char/digi* F: drivers/char/digi*
DIRECTORY NOTIFICATION (DNOTIFY) DIRECTORY NOTIFICATION (DNOTIFY)
P: Stephen Rothwell P: Eric Paris
M: sfr@canb.auug.org.au M: eparis@parisplace.org
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
S: Supported S: Maintained
F: Documentation/filesystems/dnotify.txt F: Documentation/filesystems/dnotify.txt
F: fs/notify/dnotify/ F: fs/notify/dnotify/
F: include/linux/dnotify.h F: include/linux/dnotify.h
...@@ -2858,6 +2858,8 @@ P: John McCutchan ...@@ -2858,6 +2858,8 @@ P: John McCutchan
M: john@johnmccutchan.com M: john@johnmccutchan.com
P: Robert Love P: Robert Love
M: rlove@rlove.org M: rlove@rlove.org
P: Eric Paris
M: eparis@parisplace.org
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
S: Maintained S: Maintained
F: Documentation/filesystems/inotify.txt F: Documentation/filesystems/inotify.txt
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/cdev.h> #include <linux/cdev.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/inotify.h> #include <linux/inotify.h>
#include <linux/fsnotify.h>
#include <linux/mount.h> #include <linux/mount.h>
#include <linux/async.h> #include <linux/async.h>
...@@ -189,6 +190,10 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode) ...@@ -189,6 +190,10 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
inode->i_private = NULL; inode->i_private = NULL;
inode->i_mapping = mapping; inode->i_mapping = mapping;
#ifdef CONFIG_FSNOTIFY
inode->i_fsnotify_mask = 0;
#endif
return inode; return inode;
out_free_security: out_free_security:
...@@ -221,6 +226,7 @@ void destroy_inode(struct inode *inode) ...@@ -221,6 +226,7 @@ void destroy_inode(struct inode *inode)
BUG_ON(inode_has_buffers(inode)); BUG_ON(inode_has_buffers(inode));
ima_inode_free(inode); ima_inode_free(inode);
security_inode_free(inode); security_inode_free(inode);
fsnotify_inode_delete(inode);
if (inode->i_sb->s_op->destroy_inode) if (inode->i_sb->s_op->destroy_inode)
inode->i_sb->s_op->destroy_inode(inode); inode->i_sb->s_op->destroy_inode(inode);
else else
...@@ -252,6 +258,9 @@ void inode_init_once(struct inode *inode) ...@@ -252,6 +258,9 @@ void inode_init_once(struct inode *inode)
INIT_LIST_HEAD(&inode->inotify_watches); INIT_LIST_HEAD(&inode->inotify_watches);
mutex_init(&inode->inotify_mutex); mutex_init(&inode->inotify_mutex);
#endif #endif
#ifdef CONFIG_FSNOTIFY
INIT_HLIST_HEAD(&inode->i_fsnotify_mark_entries);
#endif
} }
EXPORT_SYMBOL(inode_init_once); EXPORT_SYMBOL(inode_init_once);
...@@ -398,6 +407,7 @@ int invalidate_inodes(struct super_block *sb) ...@@ -398,6 +407,7 @@ int invalidate_inodes(struct super_block *sb)
mutex_lock(&iprune_mutex); mutex_lock(&iprune_mutex);
spin_lock(&inode_lock); spin_lock(&inode_lock);
inotify_unmount_inodes(&sb->s_inodes); inotify_unmount_inodes(&sb->s_inodes);
fsnotify_unmount_inodes(&sb->s_inodes);
busy = invalidate_list(&sb->s_inodes, &throw_away); busy = invalidate_list(&sb->s_inodes, &throw_away);
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
......
config FSNOTIFY
bool "Filesystem notification backend"
default y
---help---
fsnotify is a backend for filesystem notification. fsnotify does
not provide any userspace interface but does provide the basis
needed for other notification schemes such as dnotify, inotify,
and fanotify.
Say Y here to enable fsnotify suport.
If unsure, say Y.
source "fs/notify/dnotify/Kconfig" source "fs/notify/dnotify/Kconfig"
source "fs/notify/inotify/Kconfig" source "fs/notify/inotify/Kconfig"
obj-$(CONFIG_FSNOTIFY) += fsnotify.o notification.o group.o inode_mark.o
obj-y += dnotify/ obj-y += dnotify/
obj-y += inotify/ obj-y += inotify/
config DNOTIFY config DNOTIFY
bool "Dnotify support" bool "Dnotify support"
depends on FSNOTIFY
default y default y
help help
Dnotify is a directory-based per-fd file change notification system Dnotify is a directory-based per-fd file change notification system
......
...@@ -3,6 +3,9 @@ ...@@ -3,6 +3,9 @@
* *
* Copyright (C) 2000,2001,2002 Stephen Rothwell * Copyright (C) 2000,2001,2002 Stephen Rothwell
* *
* Copyright (C) 2009 Eric Paris <Red Hat Inc>
* dnotify was largly rewritten to use the new fsnotify infrastructure
*
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any * Free Software Foundation; either version 2, or (at your option) any
...@@ -21,24 +24,173 @@ ...@@ -21,24 +24,173 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/fdtable.h> #include <linux/fdtable.h>
#include <linux/fsnotify_backend.h>
int dir_notify_enable __read_mostly = 1; int dir_notify_enable __read_mostly = 1;
static struct kmem_cache *dn_cache __read_mostly; static struct kmem_cache *dnotify_struct_cache __read_mostly;
static struct kmem_cache *dnotify_mark_entry_cache __read_mostly;
static struct fsnotify_group *dnotify_group __read_mostly;
static DEFINE_MUTEX(dnotify_mark_mutex);
/*
* dnotify will attach one of these to each inode (i_fsnotify_mark_entries) which
* is being watched by dnotify. If multiple userspace applications are watching
* the same directory with dnotify their information is chained in dn
*/
struct dnotify_mark_entry {
struct fsnotify_mark_entry fsn_entry;
struct dnotify_struct *dn;
};
static void redo_inode_mask(struct inode *inode) /*
* When a process starts or stops watching an inode the set of events which
* dnotify cares about for that inode may change. This function runs the
* list of everything receiving dnotify events about this directory and calculates
* the set of all those events. After it updates what dnotify is interested in
* it calls the fsnotify function so it can update the set of all events relevant
* to this inode.
*/
static void dnotify_recalc_inode_mask(struct fsnotify_mark_entry *entry)
{ {
unsigned long new_mask; __u32 new_mask, old_mask;
struct dnotify_struct *dn; struct dnotify_struct *dn;
struct dnotify_mark_entry *dnentry = container_of(entry,
struct dnotify_mark_entry,
fsn_entry);
assert_spin_locked(&entry->lock);
old_mask = entry->mask;
new_mask = 0; new_mask = 0;
for (dn = inode->i_dnotify; dn != NULL; dn = dn->dn_next) for (dn = dnentry->dn; dn != NULL; dn = dn->dn_next)
new_mask |= dn->dn_mask & ~DN_MULTISHOT; new_mask |= (dn->dn_mask & ~FS_DN_MULTISHOT);
inode->i_dnotify_mask = new_mask; entry->mask = new_mask;
if (old_mask == new_mask)
return;
if (entry->inode)
fsnotify_recalc_inode_mask(entry->inode);
}
/*
* Mains fsnotify call where events are delivered to dnotify.
* Find the dnotify mark on the relevant inode, run the list of dnotify structs
* on that mark and determine which of them has expressed interest in receiving
* events of this type. When found send the correct process and signal and
* destroy the dnotify struct if it was not registered to receive multiple
* events.
*/
static int dnotify_handle_event(struct fsnotify_group *group,
struct fsnotify_event *event)
{
struct fsnotify_mark_entry *entry = NULL;
struct dnotify_mark_entry *dnentry;
struct inode *to_tell;
struct dnotify_struct *dn;
struct dnotify_struct **prev;
struct fown_struct *fown;
to_tell = event->to_tell;
spin_lock(&to_tell->i_lock);
entry = fsnotify_find_mark_entry(group, to_tell);
spin_unlock(&to_tell->i_lock);
/* unlikely since we alreay passed dnotify_should_send_event() */
if (unlikely(!entry))
return 0;
dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry);
spin_lock(&entry->lock);
prev = &dnentry->dn;
while ((dn = *prev) != NULL) {
if ((dn->dn_mask & event->mask) == 0) {
prev = &dn->dn_next;
continue;
}
fown = &dn->dn_filp->f_owner;
send_sigio(fown, dn->dn_fd, POLL_MSG);
if (dn->dn_mask & FS_DN_MULTISHOT)
prev = &dn->dn_next;
else {
*prev = dn->dn_next;
kmem_cache_free(dnotify_struct_cache, dn);
dnotify_recalc_inode_mask(entry);
}
}
spin_unlock(&entry->lock);
fsnotify_put_mark(entry);
return 0;
}
/*
* Given an inode and mask determine if dnotify would be interested in sending
* userspace notification for that pair.
*/
static bool dnotify_should_send_event(struct fsnotify_group *group,
struct inode *inode, __u32 mask)
{
struct fsnotify_mark_entry *entry;
bool send;
/* !dir_notify_enable should never get here, don't waste time checking
if (!dir_notify_enable)
return 0; */
/* not a dir, dnotify doesn't care */
if (!S_ISDIR(inode->i_mode))
return false;
spin_lock(&inode->i_lock);
entry = fsnotify_find_mark_entry(group, inode);
spin_unlock(&inode->i_lock);
/* no mark means no dnotify watch */
if (!entry)
return false;
mask = (mask & ~FS_EVENT_ON_CHILD);
send = (mask & entry->mask);
fsnotify_put_mark(entry); /* matches fsnotify_find_mark_entry */
return send;
}
static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
{
struct dnotify_mark_entry *dnentry = container_of(entry,
struct dnotify_mark_entry,
fsn_entry);
BUG_ON(dnentry->dn);
kmem_cache_free(dnotify_mark_entry_cache, dnentry);
} }
static struct fsnotify_ops dnotify_fsnotify_ops = {
.handle_event = dnotify_handle_event,
.should_send_event = dnotify_should_send_event,
.free_group_priv = NULL,
.freeing_mark = NULL,
.free_event_priv = NULL,
};
/*
* Called every time a file is closed. Looks first for a dnotify mark on the
* inode. If one is found run all of the ->dn entries attached to that
* mark for one relevant to this process closing the file and remove that
* dnotify_struct. If that was the last dnotify_struct also remove the
* fsnotify_mark_entry.
*/
void dnotify_flush(struct file *filp, fl_owner_t id) void dnotify_flush(struct file *filp, fl_owner_t id)
{ {
struct fsnotify_mark_entry *entry;
struct dnotify_mark_entry *dnentry;
struct dnotify_struct *dn; struct dnotify_struct *dn;
struct dnotify_struct **prev; struct dnotify_struct **prev;
struct inode *inode; struct inode *inode;
...@@ -46,145 +198,243 @@ void dnotify_flush(struct file *filp, fl_owner_t id) ...@@ -46,145 +198,243 @@ void dnotify_flush(struct file *filp, fl_owner_t id)
inode = filp->f_path.dentry->d_inode; inode = filp->f_path.dentry->d_inode;
if (!S_ISDIR(inode->i_mode)) if (!S_ISDIR(inode->i_mode))
return; return;
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
prev = &inode->i_dnotify; entry = fsnotify_find_mark_entry(dnotify_group, inode);
spin_unlock(&inode->i_lock);
if (!entry)
return;
dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry);
mutex_lock(&dnotify_mark_mutex);
spin_lock(&entry->lock);
prev = &dnentry->dn;
while ((dn = *prev) != NULL) { while ((dn = *prev) != NULL) {
if ((dn->dn_owner == id) && (dn->dn_filp == filp)) { if ((dn->dn_owner == id) && (dn->dn_filp == filp)) {
*prev = dn->dn_next; *prev = dn->dn_next;
redo_inode_mask(inode); kmem_cache_free(dnotify_struct_cache, dn);
kmem_cache_free(dn_cache, dn); dnotify_recalc_inode_mask(entry);
break; break;
} }
prev = &dn->dn_next; prev = &dn->dn_next;
} }
spin_unlock(&inode->i_lock);
spin_unlock(&entry->lock);
/* nothing else could have found us thanks to the dnotify_mark_mutex */
if (dnentry->dn == NULL)
fsnotify_destroy_mark_by_entry(entry);
fsnotify_recalc_group_mask(dnotify_group);
mutex_unlock(&dnotify_mark_mutex);
fsnotify_put_mark(entry);
}
/* this conversion is done only at watch creation */
static __u32 convert_arg(unsigned long arg)
{
__u32 new_mask = FS_EVENT_ON_CHILD;
if (arg & DN_MULTISHOT)
new_mask |= FS_DN_MULTISHOT;
if (arg & DN_DELETE)
new_mask |= (FS_DELETE | FS_MOVED_FROM);
if (arg & DN_MODIFY)
new_mask |= FS_MODIFY;
if (arg & DN_ACCESS)
new_mask |= FS_ACCESS;
if (arg & DN_ATTRIB)
new_mask |= FS_ATTRIB;
if (arg & DN_RENAME)
new_mask |= FS_DN_RENAME;
if (arg & DN_CREATE)
new_mask |= (FS_CREATE | FS_MOVED_TO);
return new_mask;
} }
/*
* If multiple processes watch the same inode with dnotify there is only one
* dnotify mark in inode->i_fsnotify_mark_entries but we chain a dnotify_struct
* onto that mark. This function either attaches the new dnotify_struct onto
* that list, or it |= the mask onto an existing dnofiy_struct.
*/
static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark_entry *dnentry,
fl_owner_t id, int fd, struct file *filp, __u32 mask)
{
struct dnotify_struct *odn;
odn = dnentry->dn;
while (odn != NULL) {
/* adding more events to existing dnofiy_struct? */
if ((odn->dn_owner == id) && (odn->dn_filp == filp)) {
odn->dn_fd = fd;
odn->dn_mask |= mask;
return -EEXIST;
}
odn = odn->dn_next;
}
dn->dn_mask = mask;
dn->dn_fd = fd;
dn->dn_filp = filp;
dn->dn_owner = id;
dn->dn_next = dnentry->dn;
dnentry->dn = dn;
return 0;
}
/*
* When a process calls fcntl to attach a dnotify watch to a directory it ends
* up here. Allocate both a mark for fsnotify to add and a dnotify_struct to be
* attached to the fsnotify_mark.
*/
int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
{ {
struct dnotify_mark_entry *new_dnentry, *dnentry;
struct fsnotify_mark_entry *new_entry, *entry;
struct dnotify_struct *dn; struct dnotify_struct *dn;
struct dnotify_struct *odn;
struct dnotify_struct **prev;
struct inode *inode; struct inode *inode;
fl_owner_t id = current->files; fl_owner_t id = current->files;
struct file *f; struct file *f;
int error = 0; int destroy = 0, error = 0;
__u32 mask;
/* we use these to tell if we need to kfree */
new_entry = NULL;
dn = NULL;
if (!dir_notify_enable) {
error = -EINVAL;
goto out_err;
}
/* a 0 mask means we are explicitly removing the watch */
if ((arg & ~DN_MULTISHOT) == 0) { if ((arg & ~DN_MULTISHOT) == 0) {
dnotify_flush(filp, id); dnotify_flush(filp, id);
return 0; error = 0;
goto out_err;
} }
if (!dir_notify_enable)
return -EINVAL; /* dnotify only works on directories */
inode = filp->f_path.dentry->d_inode; inode = filp->f_path.dentry->d_inode;
if (!S_ISDIR(inode->i_mode)) if (!S_ISDIR(inode->i_mode)) {
return -ENOTDIR; error = -ENOTDIR;
dn = kmem_cache_alloc(dn_cache, GFP_KERNEL); goto out_err;
if (dn == NULL)
return -ENOMEM;
spin_lock(&inode->i_lock);
prev = &inode->i_dnotify;
while ((odn = *prev) != NULL) {
if ((odn->dn_owner == id) && (odn->dn_filp == filp)) {
odn->dn_fd = fd;
odn->dn_mask |= arg;
inode->i_dnotify_mask |= arg & ~DN_MULTISHOT;
goto out_free;
}
prev = &odn->dn_next;
} }
rcu_read_lock(); /* expect most fcntl to add new rather than augment old */
f = fcheck(fd); dn = kmem_cache_alloc(dnotify_struct_cache, GFP_KERNEL);
rcu_read_unlock(); if (!dn) {
/* we'd lost the race with close(), sod off silently */ error = -ENOMEM;
/* note that inode->i_lock prevents reordering problems goto out_err;
* between accesses to descriptor table and ->i_dnotify */ }
if (f != filp)
goto out_free;
error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); /* new fsnotify mark, we expect most fcntl calls to add a new mark */
if (error) new_dnentry = kmem_cache_alloc(dnotify_mark_entry_cache, GFP_KERNEL);
goto out_free; if (!new_dnentry) {
error = -ENOMEM;
goto out_err;
}
dn->dn_mask = arg; /* convert the userspace DN_* "arg" to the internal FS_* defines in fsnotify */
dn->dn_fd = fd; mask = convert_arg(arg);
dn->dn_filp = filp;
dn->dn_owner = id;
inode->i_dnotify_mask |= arg & ~DN_MULTISHOT;
dn->dn_next = inode->i_dnotify;
inode->i_dnotify = dn;
spin_unlock(&inode->i_lock);
return 0;
out_free: /* set up the new_entry and new_dnentry */
spin_unlock(&inode->i_lock); new_entry = &new_dnentry->fsn_entry;
kmem_cache_free(dn_cache, dn); fsnotify_init_mark(new_entry, dnotify_free_mark);
return error; new_entry->mask = mask;
} new_dnentry->dn = NULL;
void __inode_dir_notify(struct inode *inode, unsigned long event) /* this is needed to prevent the fcntl/close race described below */
{ mutex_lock(&dnotify_mark_mutex);
struct dnotify_struct * dn;
struct dnotify_struct **prev;
struct fown_struct * fown;
int changed = 0;
/* add the new_entry or find an old one. */
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
prev = &inode->i_dnotify; entry = fsnotify_find_mark_entry(dnotify_group, inode);
while ((dn = *prev) != NULL) {
if ((dn->dn_mask & event) == 0) {
prev = &dn->dn_next;
continue;
}
fown = &dn->dn_filp->f_owner;
send_sigio(fown, dn->dn_fd, POLL_MSG);
if (dn->dn_mask & DN_MULTISHOT)
prev = &dn->dn_next;
else {
*prev = dn->dn_next;
changed = 1;
kmem_cache_free(dn_cache, dn);
}
}
if (changed)
redo_inode_mask(inode);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
} if (entry) {
dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry);
EXPORT_SYMBOL(__inode_dir_notify); spin_lock(&entry->lock);
} else {
fsnotify_add_mark(new_entry, dnotify_group, inode);
spin_lock(&new_entry->lock);
entry = new_entry;
dnentry = new_dnentry;
/* we used new_entry, so don't free it */
new_entry = NULL;
}
/* rcu_read_lock();
* This is hopelessly wrong, but unfixable without API changes. At f = fcheck(fd);
* least it doesn't oops the kernel... rcu_read_unlock();
*
* To safely access ->d_parent we need to keep d_move away from it. Use the
* dentry's d_lock for this.
*/
void dnotify_parent(struct dentry *dentry, unsigned long event)
{
struct dentry *parent;
if (!dir_notify_enable) /* if (f != filp) means that we lost a race and another task/thread
return; * actually closed the fd we are still playing with before we grabbed
* the dnotify_mark_mutex and entry->lock. Since closing the fd is the
* only time we clean up the mark entries we need to get our mark off
* the list. */
if (f != filp) {
/* if we added ourselves, shoot ourselves, it's possible that
* the flush actually did shoot this entry. That's fine too
* since multiple calls to destroy_mark is perfectly safe, if
* we found a dnentry already attached to the inode, just sod
* off silently as the flush at close time dealt with it.
*/
if (dnentry == new_dnentry)
destroy = 1;
goto out;
}
spin_lock(&dentry->d_lock); error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
parent = dentry->d_parent; if (error) {
if (parent->d_inode->i_dnotify_mask & event) { /* if we added, we must shoot */
dget(parent); if (dnentry == new_dnentry)
spin_unlock(&dentry->d_lock); destroy = 1;
__inode_dir_notify(parent->d_inode, event); goto out;
dput(parent);
} else {
spin_unlock(&dentry->d_lock);
} }
error = attach_dn(dn, dnentry, id, fd, filp, mask);
/* !error means that we attached the dn to the dnentry, so don't free it */
if (!error)
dn = NULL;
/* -EEXIST means that we didn't add this new dn and used an old one.
* that isn't an error (and the unused dn should be freed) */
else if (error == -EEXIST)
error = 0;
dnotify_recalc_inode_mask(entry);
out:
spin_unlock(&entry->lock);
if (destroy)
fsnotify_destroy_mark_by_entry(entry);
fsnotify_recalc_group_mask(dnotify_group);
mutex_unlock(&dnotify_mark_mutex);
fsnotify_put_mark(entry);
out_err:
if (new_entry)
fsnotify_put_mark(new_entry);
if (dn)
kmem_cache_free(dnotify_struct_cache, dn);
return error;
} }
EXPORT_SYMBOL_GPL(dnotify_parent);
static int __init dnotify_init(void) static int __init dnotify_init(void)
{ {
dn_cache = kmem_cache_create("dnotify_cache", dnotify_struct_cache = KMEM_CACHE(dnotify_struct, SLAB_PANIC);
sizeof(struct dnotify_struct), 0, SLAB_PANIC, NULL); dnotify_mark_entry_cache = KMEM_CACHE(dnotify_mark_entry, SLAB_PANIC);
dnotify_group = fsnotify_obtain_group(DNOTIFY_GROUP_NUM,
0, &dnotify_fsnotify_ops);
if (IS_ERR(dnotify_group))
panic("unable to allocate fsnotify group for dnotify\n");
return 0; return 0;
} }
......
/*
* Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/dcache.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/srcu.h>
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
/*
* Clear all of the marks on an inode when it is being evicted from core
*/
void __fsnotify_inode_delete(struct inode *inode)
{
fsnotify_clear_marks_by_inode(inode);
}
EXPORT_SYMBOL_GPL(__fsnotify_inode_delete);
/*
* Given an inode, first check if we care what happens to our children. Inotify
* and dnotify both tell their parents about events. If we care about any event
* on a child we run all of our children and set a dentry flag saying that the
* parent cares. Thus when an event happens on a child it can quickly tell if
* if there is a need to find a parent and send the event to the parent.
*/
void __fsnotify_update_child_dentry_flags(struct inode *inode)
{
struct dentry *alias;
int watched;
if (!S_ISDIR(inode->i_mode))
return;
/* determine if the children should tell inode about their events */
watched = fsnotify_inode_watches_children(inode);
spin_lock(&dcache_lock);
/* run all of the dentries associated with this inode. Since this is a
* directory, there damn well better only be one item on this list */
list_for_each_entry(alias, &inode->i_dentry, d_alias) {
struct dentry *child;
/* run all of the children of the original inode and fix their
* d_flags to indicate parental interest (their parent is the
* original inode) */
list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
if (!child->d_inode)
continue;
spin_lock(&child->d_lock);
if (watched)
child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
else
child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
spin_unlock(&child->d_lock);
}
}
spin_unlock(&dcache_lock);
}
/* Notify this dentry's parent about a child's events. */
void __fsnotify_parent(struct dentry *dentry, __u32 mask)
{
struct dentry *parent;
struct inode *p_inode;
bool send = false;
bool should_update_children = false;
if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED))
return;
spin_lock(&dentry->d_lock);
parent = dentry->d_parent;
p_inode = parent->d_inode;
if (fsnotify_inode_watches_children(p_inode)) {
if (p_inode->i_fsnotify_mask & mask) {
dget(parent);
send = true;
}
} else {
/*
* The parent doesn't care about events on it's children but
* at least one child thought it did. We need to run all the
* children and update their d_flags to let them know p_inode
* doesn't care about them any more.
*/
dget(parent);
should_update_children = true;
}
spin_unlock(&dentry->d_lock);
if (send) {
/* we are notifying a parent so come up with the new mask which
* specifies these are events which came from a child. */
mask |= FS_EVENT_ON_CHILD;
fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
dentry->d_name.name, 0);
dput(parent);
}
if (unlikely(should_update_children)) {
__fsnotify_update_child_dentry_flags(p_inode);
dput(parent);
}
}
EXPORT_SYMBOL_GPL(__fsnotify_parent);
/*
* This is the main call to fsnotify. The VFS calls into hook specific functions
* in linux/fsnotify.h. Those functions then in turn call here. Here will call
* out to all of the registered fsnotify_group. Those groups can then use the
* notification event in whatever means they feel necessary.
*/
void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, const char *file_name, u32 cookie)
{
struct fsnotify_group *group;
struct fsnotify_event *event = NULL;
int idx;
/* global tests shouldn't care about events on child only the specific event */
__u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
if (list_empty(&fsnotify_groups))
return;
if (!(test_mask & fsnotify_mask))
return;
if (!(test_mask & to_tell->i_fsnotify_mask))
return;
/*
* SRCU!! the groups list is very very much read only and the path is
* very hot. The VAST majority of events are not going to need to do
* anything other than walk the list so it's crazy to pre-allocate.
*/
idx = srcu_read_lock(&fsnotify_grp_srcu);
list_for_each_entry_rcu(group, &fsnotify_groups, group_list) {
if (test_mask & group->mask) {
if (!group->ops->should_send_event(group, to_tell, mask))
continue;
if (!event) {
event = fsnotify_create_event(to_tell, mask, data, data_is, file_name, cookie);
/* shit, we OOM'd and now we can't tell, maybe
* someday someone else will want to do something
* here */
if (!event)
break;
}
group->ops->handle_event(group, event);
}
}
srcu_read_unlock(&fsnotify_grp_srcu, idx);
/*
* fsnotify_create_event() took a reference so the event can't be cleaned
* up while we are still trying to add it to lists, drop that one.
*/
if (event)
fsnotify_put_event(event);
}
EXPORT_SYMBOL_GPL(fsnotify);
static __init int fsnotify_init(void)
{
return init_srcu_struct(&fsnotify_grp_srcu);
}
subsys_initcall(fsnotify_init);
#ifndef __FS_NOTIFY_FSNOTIFY_H_
#define __FS_NOTIFY_FSNOTIFY_H_
#include <linux/list.h>
#include <linux/fsnotify.h>
#include <linux/srcu.h>
#include <linux/types.h>
/* protects reads of fsnotify_groups */
extern struct srcu_struct fsnotify_grp_srcu;
/* all groups which receive fsnotify events */
extern struct list_head fsnotify_groups;
/* all bitwise OR of all event types (FS_*) for all fsnotify_groups */
extern __u32 fsnotify_mask;
/* destroy all events sitting in this groups notification queue */
extern void fsnotify_flush_notify(struct fsnotify_group *group);
/* final kfree of a group */
extern void fsnotify_final_destroy_group(struct fsnotify_group *group);
/* run the list of all marks associated with inode and flag them to be freed */
extern void fsnotify_clear_marks_by_inode(struct inode *inode);
/*
* update the dentry->d_flags of all of inode's children to indicate if inode cares
* about events that happen to its children.
*/
extern void __fsnotify_update_child_dentry_flags(struct inode *inode);
/* allocate and destroy and event holder to attach events to notification/access queues */
extern struct fsnotify_event_holder *fsnotify_alloc_event_holder(void);
extern void fsnotify_destroy_event_holder(struct fsnotify_event_holder *holder);
#endif /* __FS_NOTIFY_FSNOTIFY_H_ */
/*
* Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/srcu.h>
#include <linux/rculist.h>
#include <linux/wait.h>
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
#include <asm/atomic.h>
/* protects writes to fsnotify_groups and fsnotify_mask */
static DEFINE_MUTEX(fsnotify_grp_mutex);
/* protects reads while running the fsnotify_groups list */
struct srcu_struct fsnotify_grp_srcu;
/* all groups registered to receive filesystem notifications */
LIST_HEAD(fsnotify_groups);
/* bitwise OR of all events (FS_*) interesting to some group on this system */
__u32 fsnotify_mask;
/*
* When a new group registers or changes it's set of interesting events
* this function updates the fsnotify_mask to contain all interesting events
*/
void fsnotify_recalc_global_mask(void)
{
struct fsnotify_group *group;
__u32 mask = 0;
int idx;
idx = srcu_read_lock(&fsnotify_grp_srcu);
list_for_each_entry_rcu(group, &fsnotify_groups, group_list)
mask |= group->mask;
srcu_read_unlock(&fsnotify_grp_srcu, idx);
fsnotify_mask = mask;
}
/*
* Update the group->mask by running all of the marks associated with this
* group and finding the bitwise | of all of the mark->mask. If we change
* the group->mask we need to update the global mask of events interesting
* to the system.
*/
void fsnotify_recalc_group_mask(struct fsnotify_group *group)
{
__u32 mask = 0;
__u32 old_mask = group->mask;
struct fsnotify_mark_entry *entry;
spin_lock(&group->mark_lock);
list_for_each_entry(entry, &group->mark_entries, g_list)
mask |= entry->mask;
spin_unlock(&group->mark_lock);
group->mask = mask;
if (old_mask != mask)
fsnotify_recalc_global_mask();
}
/*
* Take a reference to a group so things found under the fsnotify_grp_mutex
* can't get freed under us
*/
static void fsnotify_get_group(struct fsnotify_group *group)
{
atomic_inc(&group->refcnt);
}
/*
* Final freeing of a group
*/
void fsnotify_final_destroy_group(struct fsnotify_group *group)
{
/* clear the notification queue of all events */
fsnotify_flush_notify(group);
if (group->ops->free_group_priv)
group->ops->free_group_priv(group);
kfree(group);
}
/*
* Trying to get rid of a group. We need to first get rid of any outstanding
* allocations and then free the group. Remember that fsnotify_clear_marks_by_group
* could miss marks that are being freed by inode and those marks could still
* hold a reference to this group (via group->num_marks) If we get into that
* situtation, the fsnotify_final_destroy_group will get called when that final
* mark is freed.
*/
static void fsnotify_destroy_group(struct fsnotify_group *group)
{
/* clear all inode mark entries for this group */
fsnotify_clear_marks_by_group(group);
/* past the point of no return, matches the initial value of 1 */
if (atomic_dec_and_test(&group->num_marks))
fsnotify_final_destroy_group(group);
}
/*
* Remove this group from the global list of groups that will get events
* this can be done even if there are still references and things still using
* this group. This just stops the group from getting new events.
*/
static void __fsnotify_evict_group(struct fsnotify_group *group)
{
BUG_ON(!mutex_is_locked(&fsnotify_grp_mutex));
if (group->on_group_list)
list_del_rcu(&group->group_list);
group->on_group_list = 0;
}
/*
* Called when a group is no longer interested in getting events. This can be
* used if a group is misbehaving or if for some reason a group should no longer
* get any filesystem events.
*/
void fsnotify_evict_group(struct fsnotify_group *group)
{
mutex_lock(&fsnotify_grp_mutex);
__fsnotify_evict_group(group);
mutex_unlock(&fsnotify_grp_mutex);
}
/*
* Drop a reference to a group. Free it if it's through.
*/
void fsnotify_put_group(struct fsnotify_group *group)
{
if (!atomic_dec_and_mutex_lock(&group->refcnt, &fsnotify_grp_mutex))
return;
/*
* OK, now we know that there's no other users *and* we hold mutex,
* so no new references will appear
*/
__fsnotify_evict_group(group);
/*
* now it's off the list, so the only thing we might care about is
* srcu access....
*/
mutex_unlock(&fsnotify_grp_mutex);
synchronize_srcu(&fsnotify_grp_srcu);
/* and now it is really dead. _Nothing_ could be seeing it */
fsnotify_recalc_global_mask();
fsnotify_destroy_group(group);
}
/*
* Simply run the fsnotify_groups list and find a group which matches
* the given parameters. If a group is found we take a reference to that
* group.
*/
static struct fsnotify_group *fsnotify_find_group(unsigned int group_num, __u32 mask,
const struct fsnotify_ops *ops)
{
struct fsnotify_group *group_iter;
struct fsnotify_group *group = NULL;
BUG_ON(!mutex_is_locked(&fsnotify_grp_mutex));
list_for_each_entry_rcu(group_iter, &fsnotify_groups, group_list) {
if (group_iter->group_num == group_num) {
if ((group_iter->mask == mask) &&
(group_iter->ops == ops)) {
fsnotify_get_group(group_iter);
group = group_iter;
} else
group = ERR_PTR(-EEXIST);
}
}
return group;
}
/*
* Either finds an existing group which matches the group_num, mask, and ops or
* creates a new group and adds it to the global group list. In either case we
* take a reference for the group returned.
*/
struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num, __u32 mask,
const struct fsnotify_ops *ops)
{
struct fsnotify_group *group, *tgroup;
/* very low use, simpler locking if we just always alloc */
group = kmalloc(sizeof(struct fsnotify_group), GFP_KERNEL);
if (!group)
return ERR_PTR(-ENOMEM);
atomic_set(&group->refcnt, 1);
group->on_group_list = 0;
group->group_num = group_num;
group->mask = mask;
mutex_init(&group->notification_mutex);
INIT_LIST_HEAD(&group->notification_list);
init_waitqueue_head(&group->notification_waitq);
group->q_len = 0;
group->max_events = UINT_MAX;
spin_lock_init(&group->mark_lock);
atomic_set(&group->num_marks, 0);
INIT_LIST_HEAD(&group->mark_entries);
group->ops = ops;
mutex_lock(&fsnotify_grp_mutex);
tgroup = fsnotify_find_group(group_num, mask, ops);
if (tgroup) {
/* group already exists */
mutex_unlock(&fsnotify_grp_mutex);
/* destroy the new one we made */
fsnotify_put_group(group);
return tgroup;
}
/* group not found, add a new one */
list_add_rcu(&group->group_list, &fsnotify_groups);
group->on_group_list = 1;
/* being on the fsnotify_groups list holds one num_marks */
atomic_inc(&group->num_marks);
mutex_unlock(&fsnotify_grp_mutex);
if (mask)
fsnotify_recalc_global_mask();
return group;
}
/*
* Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* fsnotify inode mark locking/lifetime/and refcnting
*
* REFCNT:
* The mark->refcnt tells how many "things" in the kernel currently are
* referencing this object. The object typically will live inside the kernel
* with a refcnt of 2, one for each list it is on (i_list, g_list). Any task
* which can find this object holding the appropriete locks, can take a reference
* and the object itself is guarenteed to survive until the reference is dropped.
*
* LOCKING:
* There are 3 spinlocks involved with fsnotify inode marks and they MUST
* be taken in order as follows:
*
* entry->lock
* group->mark_lock
* inode->i_lock
*
* entry->lock protects 2 things, entry->group and entry->inode. You must hold
* that lock to dereference either of these things (they could be NULL even with
* the lock)
*
* group->mark_lock protects the mark_entries list anchored inside a given group
* and each entry is hooked via the g_list. It also sorta protects the
* free_g_list, which when used is anchored by a private list on the stack of the
* task which held the group->mark_lock.
*
* inode->i_lock protects the i_fsnotify_mark_entries list anchored inside a
* given inode and each entry is hooked via the i_list. (and sorta the
* free_i_list)
*
*
* LIFETIME:
* Inode marks survive between when they are added to an inode and when their
* refcnt==0.
*
* The inode mark can be cleared for a number of different reasons including:
* - The inode is unlinked for the last time. (fsnotify_inode_remove)
* - The inode is being evicted from cache. (fsnotify_inode_delete)
* - The fs the inode is on is unmounted. (fsnotify_inode_delete/fsnotify_unmount_inodes)
* - Something explicitly requests that it be removed. (fsnotify_destroy_mark_by_entry)
* - The fsnotify_group associated with the mark is going away and all such marks
* need to be cleaned up. (fsnotify_clear_marks_by_group)
*
* Worst case we are given an inode and need to clean up all the marks on that
* inode. We take i_lock and walk the i_fsnotify_mark_entries safely. For each
* mark on the list we take a reference (so the mark can't disappear under us).
* We remove that mark form the inode's list of marks and we add this mark to a
* private list anchored on the stack using i_free_list; At this point we no
* longer fear anything finding the mark using the inode's list of marks.
*
* We can safely and locklessly run the private list on the stack of everything
* we just unattached from the original inode. For each mark on the private list
* we grab the mark-> and can thus dereference mark->group and mark->inode. If
* we see the group and inode are not NULL we take those locks. Now holding all
* 3 locks we can completely remove the mark from other tasks finding it in the
* future. Remember, 10 things might already be referencing this mark, but they
* better be holding a ref. We drop our reference we took before we unhooked it
* from the inode. When the ref hits 0 we can free the mark.
*
* Very similarly for freeing by group, except we use free_g_list.
*
* This has the very interesting property of being able to run concurrently with
* any (or all) other directions.
*/
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/writeback.h> /* for inode_lock */
#include <asm/atomic.h>
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
void fsnotify_get_mark(struct fsnotify_mark_entry *entry)
{
atomic_inc(&entry->refcnt);
}
void fsnotify_put_mark(struct fsnotify_mark_entry *entry)
{
if (atomic_dec_and_test(&entry->refcnt))
entry->free_mark(entry);
}
/*
* Recalculate the mask of events relevant to a given inode locked.
*/
static void fsnotify_recalc_inode_mask_locked(struct inode *inode)
{
struct fsnotify_mark_entry *entry;
struct hlist_node *pos;
__u32 new_mask = 0;
assert_spin_locked(&inode->i_lock);
hlist_for_each_entry(entry, pos, &inode->i_fsnotify_mark_entries, i_list)
new_mask |= entry->mask;
inode->i_fsnotify_mask = new_mask;
}
/*
* Recalculate the inode->i_fsnotify_mask, or the mask of all FS_* event types
* any notifier is interested in hearing for this inode.
*/
void fsnotify_recalc_inode_mask(struct inode *inode)
{
spin_lock(&inode->i_lock);
fsnotify_recalc_inode_mask_locked(inode);
spin_unlock(&inode->i_lock);
__fsnotify_update_child_dentry_flags(inode);
}
/*
* Any time a mark is getting freed we end up here.
* The caller had better be holding a reference to this mark so we don't actually
* do the final put under the entry->lock
*/
void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry)
{
struct fsnotify_group *group;
struct inode *inode;
spin_lock(&entry->lock);
group = entry->group;
inode = entry->inode;
BUG_ON(group && !inode);
BUG_ON(!group && inode);
/* if !group something else already marked this to die */
if (!group) {
spin_unlock(&entry->lock);
return;
}
/* 1 from caller and 1 for being on i_list/g_list */
BUG_ON(atomic_read(&entry->refcnt) < 2);
spin_lock(&group->mark_lock);
spin_lock(&inode->i_lock);
hlist_del_init(&entry->i_list);
entry->inode = NULL;
list_del_init(&entry->g_list);
entry->group = NULL;
fsnotify_put_mark(entry); /* for i_list and g_list */
/*
* this mark is now off the inode->i_fsnotify_mark_entries list and we
* hold the inode->i_lock, so this is the perfect time to update the
* inode->i_fsnotify_mask
*/
fsnotify_recalc_inode_mask_locked(inode);
spin_unlock(&inode->i_lock);
spin_unlock(&group->mark_lock);
spin_unlock(&entry->lock);
/*
* Some groups like to know that marks are being freed. This is a
* callback to the group function to let it know that this entry
* is being freed.
*/
if (group->ops->freeing_mark)
group->ops->freeing_mark(entry, group);
/*
* __fsnotify_update_child_dentry_flags(inode);
*
* I really want to call that, but we can't, we have no idea if the inode
* still exists the second we drop the entry->lock.
*
* The next time an event arrive to this inode from one of it's children
* __fsnotify_parent will see that the inode doesn't care about it's
* children and will update all of these flags then. So really this
* is just a lazy update (and could be a perf win...)
*/
iput(inode);
/*
* it's possible that this group tried to destroy itself, but this
* this mark was simultaneously being freed by inode. If that's the
* case, we finish freeing the group here.
*/
if (unlikely(atomic_dec_and_test(&group->num_marks)))
fsnotify_final_destroy_group(group);
}
/*
* Given a group, destroy all of the marks associated with that group.
*/
void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
{
struct fsnotify_mark_entry *lentry, *entry;
LIST_HEAD(free_list);
spin_lock(&group->mark_lock);
list_for_each_entry_safe(entry, lentry, &group->mark_entries, g_list) {
list_add(&entry->free_g_list, &free_list);
list_del_init(&entry->g_list);
fsnotify_get_mark(entry);
}
spin_unlock(&group->mark_lock);
list_for_each_entry_safe(entry, lentry, &free_list, free_g_list) {
fsnotify_destroy_mark_by_entry(entry);
fsnotify_put_mark(entry);
}
}
/*
* Given an inode, destroy all of the marks associated with that inode.
*/
void fsnotify_clear_marks_by_inode(struct inode *inode)
{
struct fsnotify_mark_entry *entry, *lentry;
struct hlist_node *pos, *n;
LIST_HEAD(free_list);
spin_lock(&inode->i_lock);
hlist_for_each_entry_safe(entry, pos, n, &inode->i_fsnotify_mark_entries, i_list) {
list_add(&entry->free_i_list, &free_list);
hlist_del_init(&entry->i_list);
fsnotify_get_mark(entry);
}
spin_unlock(&inode->i_lock);
list_for_each_entry_safe(entry, lentry, &free_list, free_i_list) {
fsnotify_destroy_mark_by_entry(entry);
fsnotify_put_mark(entry);
}
}
/*
* given a group and inode, find the mark associated with that combination.
* if found take a reference to that mark and return it, else return NULL
*/
struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group,
struct inode *inode)
{
struct fsnotify_mark_entry *entry;
struct hlist_node *pos;
assert_spin_locked(&inode->i_lock);
hlist_for_each_entry(entry, pos, &inode->i_fsnotify_mark_entries, i_list) {
if (entry->group == group) {
fsnotify_get_mark(entry);
return entry;
}
}
return NULL;
}
/*
* Nothing fancy, just initialize lists and locks and counters.
*/
void fsnotify_init_mark(struct fsnotify_mark_entry *entry,
void (*free_mark)(struct fsnotify_mark_entry *entry))
{
spin_lock_init(&entry->lock);
atomic_set(&entry->refcnt, 1);
INIT_HLIST_NODE(&entry->i_list);
entry->group = NULL;
entry->mask = 0;
entry->inode = NULL;
entry->free_mark = free_mark;
}
/*
* Attach an initialized mark entry to a given group and inode.
* These marks may be used for the fsnotify backend to determine which
* event types should be delivered to which group and for which inodes.
*/
int fsnotify_add_mark(struct fsnotify_mark_entry *entry,
struct fsnotify_group *group, struct inode *inode)
{
struct fsnotify_mark_entry *lentry;
int ret = 0;
inode = igrab(inode);
if (unlikely(!inode))
return -EINVAL;
/*
* LOCKING ORDER!!!!
* entry->lock
* group->mark_lock
* inode->i_lock
*/
spin_lock(&entry->lock);
spin_lock(&group->mark_lock);
spin_lock(&inode->i_lock);
entry->group = group;
entry->inode = inode;
lentry = fsnotify_find_mark_entry(group, inode);
if (!lentry) {
hlist_add_head(&entry->i_list, &inode->i_fsnotify_mark_entries);
list_add(&entry->g_list, &group->mark_entries);
fsnotify_get_mark(entry); /* for i_list and g_list */
atomic_inc(&group->num_marks);
fsnotify_recalc_inode_mask_locked(inode);
}
spin_unlock(&inode->i_lock);
spin_unlock(&group->mark_lock);
spin_unlock(&entry->lock);
if (lentry) {
ret = -EEXIST;
iput(inode);
fsnotify_put_mark(lentry);
} else {
__fsnotify_update_child_dentry_flags(inode);
}
return ret;
}
/**
* fsnotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
* @list: list of inodes being unmounted (sb->s_inodes)
*
* Called with inode_lock held, protecting the unmounting super block's list
* of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
* We temporarily drop inode_lock, however, and CAN block.
*/
void fsnotify_unmount_inodes(struct list_head *list)
{
struct inode *inode, *next_i, *need_iput = NULL;
list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
struct inode *need_iput_tmp;
/*
* We cannot __iget() an inode in state I_CLEAR, I_FREEING,
* I_WILL_FREE, or I_NEW which is fine because by that point
* the inode cannot have any associated watches.
*/
if (inode->i_state & (I_CLEAR|I_FREEING|I_WILL_FREE|I_NEW))
continue;
/*
* If i_count is zero, the inode cannot have any watches and
* doing an __iget/iput with MS_ACTIVE clear would actually
* evict all inodes with zero i_count from icache which is
* unnecessarily violent and may in fact be illegal to do.
*/
if (!atomic_read(&inode->i_count))
continue;
need_iput_tmp = need_iput;
need_iput = NULL;
/* In case fsnotify_inode_delete() drops a reference. */
if (inode != need_iput_tmp)
__iget(inode);
else
need_iput_tmp = NULL;
/* In case the dropping of a reference would nuke next_i. */
if ((&next_i->i_sb_list != list) &&
atomic_read(&next_i->i_count) &&
!(next_i->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE))) {
__iget(next_i);
need_iput = next_i;
}
/*
* We can safely drop inode_lock here because we hold
* references on both inode and next_i. Also no new inodes
* will be added since the umount has begun. Finally,
* iprune_mutex keeps shrink_icache_memory() away.
*/
spin_unlock(&inode_lock);
if (need_iput_tmp)
iput(need_iput_tmp);
/* for each watch, send FS_UNMOUNT and then remove it */
fsnotify(inode, FS_UNMOUNT, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
fsnotify_inode_delete(inode);
iput(inode);
spin_lock(&inode_lock);
}
}
config INOTIFY config INOTIFY
bool "Inotify file change notification support" bool "Inotify file change notification support"
default y default n
---help--- ---help---
Say Y here to enable inotify support. Inotify is a file change Say Y here to enable legacy in kernel inotify support. Inotify is a
notification system and a replacement for dnotify. Inotify fixes file change notification system. It is a replacement for dnotify.
numerous shortcomings in dnotify and introduces several new features This option only provides the legacy inotify in kernel API. There
including multiple file events, one-shot support, and unmount are no in tree kernel users of this interface since it is deprecated.
notification. You only need this if you are loading an out of tree kernel module
that uses inotify.
For more information, see <file:Documentation/filesystems/inotify.txt> For more information, see <file:Documentation/filesystems/inotify.txt>
If unsure, say Y. If unsure, say N.
config INOTIFY_USER config INOTIFY_USER
bool "Inotify support for userspace" bool "Inotify support for userspace"
depends on INOTIFY depends on FSNOTIFY
default y default y
---help--- ---help---
Say Y here to enable inotify support for userspace, including the Say Y here to enable inotify support for userspace, including the
associated system calls. Inotify allows monitoring of both files and associated system calls. Inotify allows monitoring of both files and
directories via a single open fd. Events are read from the file directories via a single open fd. Events are read from the file
descriptor, which is also select()- and poll()-able. descriptor, which is also select()- and poll()-able.
Inotify fixes numerous shortcomings in dnotify and introduces several
new features including multiple file events, one-shot support, and
unmount notification.
For more information, see <file:Documentation/filesystems/inotify.txt> For more information, see <file:Documentation/filesystems/inotify.txt>
......
obj-$(CONFIG_INOTIFY) += inotify.o obj-$(CONFIG_INOTIFY) += inotify.o
obj-$(CONFIG_INOTIFY_USER) += inotify_user.o obj-$(CONFIG_INOTIFY_USER) += inotify_fsnotify.o inotify_user.o
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/writeback.h> #include <linux/writeback.h>
#include <linux/inotify.h> #include <linux/inotify.h>
#include <linux/fsnotify_backend.h>
static atomic_t inotify_cookie; static atomic_t inotify_cookie;
...@@ -905,6 +906,25 @@ EXPORT_SYMBOL_GPL(inotify_rm_watch); ...@@ -905,6 +906,25 @@ EXPORT_SYMBOL_GPL(inotify_rm_watch);
*/ */
static int __init inotify_setup(void) static int __init inotify_setup(void)
{ {
BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
BUILD_BUG_ON(IN_OPEN != FS_OPEN);
BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
BUILD_BUG_ON(IN_CREATE != FS_CREATE);
BUILD_BUG_ON(IN_DELETE != FS_DELETE);
BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
BUILD_BUG_ON(IN_ISDIR != FS_IN_ISDIR);
BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
atomic_set(&inotify_cookie, 0); atomic_set(&inotify_cookie, 0);
return 0; return 0;
......
#include <linux/fsnotify_backend.h>
#include <linux/inotify.h>
#include <linux/slab.h> /* struct kmem_cache */
extern struct kmem_cache *event_priv_cachep;
struct inotify_event_private_data {
struct fsnotify_event_private_data fsnotify_event_priv_data;
int wd;
};
struct inotify_inode_mark_entry {
/* fsnotify_mark_entry MUST be the first thing */
struct fsnotify_mark_entry fsn_entry;
int wd;
};
extern void inotify_destroy_mark_entry(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
extern void inotify_free_event_priv(struct fsnotify_event_private_data *event_priv);
extern const struct fsnotify_ops inotify_fsnotify_ops;
/*
* fs/inotify_user.c - inotify support for userspace
*
* Authors:
* John McCutchan <ttb@tentacle.dhs.org>
* Robert Love <rml@novell.com>
*
* Copyright (C) 2005 John McCutchan
* Copyright 2006 Hewlett-Packard Development Company, L.P.
*
* Copyright (C) 2009 Eric Paris <Red Hat Inc>
* inotify was largely rewriten to make use of the fsnotify infrastructure
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/fs.h> /* struct inode */
#include <linux/fsnotify_backend.h>
#include <linux/inotify.h>
#include <linux/path.h> /* struct path */
#include <linux/slab.h> /* kmem_* */
#include <linux/types.h>
#include "inotify.h"
static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
{
struct fsnotify_mark_entry *entry;
struct inotify_inode_mark_entry *ientry;
struct inode *to_tell;
struct inotify_event_private_data *event_priv;
struct fsnotify_event_private_data *fsn_event_priv;
int wd, ret;
to_tell = event->to_tell;
spin_lock(&to_tell->i_lock);
entry = fsnotify_find_mark_entry(group, to_tell);
spin_unlock(&to_tell->i_lock);
/* race with watch removal? We already passes should_send */
if (unlikely(!entry))
return 0;
ientry = container_of(entry, struct inotify_inode_mark_entry,
fsn_entry);
wd = ientry->wd;
event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL);
if (unlikely(!event_priv))
return -ENOMEM;
fsn_event_priv = &event_priv->fsnotify_event_priv_data;
fsn_event_priv->group = group;
event_priv->wd = wd;
ret = fsnotify_add_notify_event(group, event, fsn_event_priv);
/* EEXIST is not an error */
if (ret == -EEXIST)
ret = 0;
/* did event_priv get attached? */
if (list_empty(&fsn_event_priv->event_list))
inotify_free_event_priv(fsn_event_priv);
/*
* If we hold the entry until after the event is on the queue
* IN_IGNORED won't be able to pass this event in the queue
*/
fsnotify_put_mark(entry);
return ret;
}
static void inotify_freeing_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group)
{
inotify_destroy_mark_entry(entry, group);
}
static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode, __u32 mask)
{
struct fsnotify_mark_entry *entry;
bool send;
spin_lock(&inode->i_lock);
entry = fsnotify_find_mark_entry(group, inode);
spin_unlock(&inode->i_lock);
if (!entry)
return false;
mask = (mask & ~FS_EVENT_ON_CHILD);
send = (entry->mask & mask);
/* find took a reference */
fsnotify_put_mark(entry);
return send;
}
static int idr_callback(int id, void *p, void *data)
{
BUG();
return 0;
}
static void inotify_free_group_priv(struct fsnotify_group *group)
{
/* ideally the idr is empty and we won't hit the BUG in teh callback */
idr_for_each(&group->inotify_data.idr, idr_callback, NULL);
idr_remove_all(&group->inotify_data.idr);
idr_destroy(&group->inotify_data.idr);
}
void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv)
{
struct inotify_event_private_data *event_priv;
event_priv = container_of(fsn_event_priv, struct inotify_event_private_data,
fsnotify_event_priv_data);
kmem_cache_free(event_priv_cachep, event_priv);
}
const struct fsnotify_ops inotify_fsnotify_ops = {
.handle_event = inotify_handle_event,
.should_send_event = inotify_should_send_event,
.free_group_priv = inotify_free_group_priv,
.free_event_priv = inotify_free_event_priv,
.freeing_mark = inotify_freeing_mark,
};
...@@ -8,6 +8,9 @@ ...@@ -8,6 +8,9 @@
* Copyright (C) 2005 John McCutchan * Copyright (C) 2005 John McCutchan
* Copyright 2006 Hewlett-Packard Development Company, L.P. * Copyright 2006 Hewlett-Packard Development Company, L.P.
* *
* Copyright (C) 2009 Eric Paris <Red Hat Inc>
* inotify was largely rewriten to make use of the fsnotify infrastructure
*
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any * Free Software Foundation; either version 2, or (at your option) any
...@@ -19,94 +22,48 @@ ...@@ -19,94 +22,48 @@
* General Public License for more details. * General Public License for more details.
*/ */
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/mount.h> #include <linux/fs.h> /* struct inode */
#include <linux/namei.h> #include <linux/fsnotify_backend.h>
#include <linux/poll.h> #include <linux/idr.h>
#include <linux/init.h> #include <linux/init.h> /* module_init */
#include <linux/list.h>
#include <linux/inotify.h> #include <linux/inotify.h>
#include <linux/kernel.h> /* roundup() */
#include <linux/magic.h> /* superblock magic number */
#include <linux/mount.h> /* mntget */
#include <linux/namei.h> /* LOOKUP_FOLLOW */
#include <linux/path.h> /* struct path */
#include <linux/sched.h> /* struct user */
#include <linux/slab.h> /* struct kmem_cache */
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/magic.h> #include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <linux/wait.h>
#include <asm/ioctls.h> #include "inotify.h"
static struct kmem_cache *watch_cachep __read_mostly; #include <asm/ioctls.h>
static struct kmem_cache *event_cachep __read_mostly;
static struct vfsmount *inotify_mnt __read_mostly; static struct vfsmount *inotify_mnt __read_mostly;
/* this just sits here and wastes global memory. used to just pad userspace messages with zeros */
static struct inotify_event nul_inotify_event;
/* these are configurable via /proc/sys/fs/inotify/ */ /* these are configurable via /proc/sys/fs/inotify/ */
static int inotify_max_user_instances __read_mostly; static int inotify_max_user_instances __read_mostly;
static int inotify_max_user_watches __read_mostly;
static int inotify_max_queued_events __read_mostly; static int inotify_max_queued_events __read_mostly;
int inotify_max_user_watches __read_mostly;
/* static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
* Lock ordering: struct kmem_cache *event_priv_cachep __read_mostly;
* static struct fsnotify_event *inotify_ignored_event;
* inotify_dev->up_mutex (ensures we don't re-add the same watch)
* inode->inotify_mutex (protects inode's watch list)
* inotify_handle->mutex (protects inotify_handle's watch list)
* inotify_dev->ev_mutex (protects device's event queue)
*/
/* /*
* Lifetimes of the main data structures: * When inotify registers a new group it increments this and uses that
* * value as an offset to set the fsnotify group "name" and priority.
* inotify_device: Lifetime is managed by reference count, from
* sys_inotify_init() until release. Additional references can bump the count
* via get_inotify_dev() and drop the count via put_inotify_dev().
*
* inotify_user_watch: Lifetime is from create_watch() to the receipt of an
* IN_IGNORED event from inotify, or when using IN_ONESHOT, to receipt of the
* first event, or to inotify_destroy().
*/ */
static atomic_t inotify_grp_num;
/*
* struct inotify_device - represents an inotify instance
*
* This structure is protected by the mutex 'mutex'.
*/
struct inotify_device {
wait_queue_head_t wq; /* wait queue for i/o */
struct mutex ev_mutex; /* protects event queue */
struct mutex up_mutex; /* synchronizes watch updates */
struct list_head events; /* list of queued events */
struct user_struct *user; /* user who opened this dev */
struct inotify_handle *ih; /* inotify handle */
struct fasync_struct *fa; /* async notification */
atomic_t count; /* reference count */
unsigned int queue_size; /* size of the queue (bytes) */
unsigned int event_count; /* number of pending events */
unsigned int max_events; /* maximum number of events */
};
/*
* struct inotify_kernel_event - An inotify event, originating from a watch and
* queued for user-space. A list of these is attached to each instance of the
* device. In read(), this list is walked and all events that can fit in the
* buffer are returned.
*
* Protected by dev->ev_mutex of the device in which we are queued.
*/
struct inotify_kernel_event {
struct inotify_event event; /* the user-space event */
struct list_head list; /* entry in inotify_device's list */
char *name; /* filename, if any */
};
/*
* struct inotify_user_watch - our version of an inotify_watch, we add
* a reference to the associated inotify_device.
*/
struct inotify_user_watch {
struct inotify_device *dev; /* associated device */
struct inotify_watch wdata; /* inotify watch data */
};
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
...@@ -149,280 +106,36 @@ ctl_table inotify_table[] = { ...@@ -149,280 +106,36 @@ ctl_table inotify_table[] = {
}; };
#endif /* CONFIG_SYSCTL */ #endif /* CONFIG_SYSCTL */
static inline void get_inotify_dev(struct inotify_device *dev) static inline __u32 inotify_arg_to_mask(u32 arg)
{
atomic_inc(&dev->count);
}
static inline void put_inotify_dev(struct inotify_device *dev)
{
if (atomic_dec_and_test(&dev->count)) {
atomic_dec(&dev->user->inotify_devs);
free_uid(dev->user);
kfree(dev);
}
}
/*
* free_inotify_user_watch - cleans up the watch and its references
*/
static void free_inotify_user_watch(struct inotify_watch *w)
{
struct inotify_user_watch *watch;
struct inotify_device *dev;
watch = container_of(w, struct inotify_user_watch, wdata);
dev = watch->dev;
atomic_dec(&dev->user->inotify_watches);
put_inotify_dev(dev);
kmem_cache_free(watch_cachep, watch);
}
/*
* kernel_event - create a new kernel event with the given parameters
*
* This function can sleep.
*/
static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
const char *name)
{
struct inotify_kernel_event *kevent;
kevent = kmem_cache_alloc(event_cachep, GFP_NOFS);
if (unlikely(!kevent))
return NULL;
/* we hand this out to user-space, so zero it just in case */
memset(&kevent->event, 0, sizeof(struct inotify_event));
kevent->event.wd = wd;
kevent->event.mask = mask;
kevent->event.cookie = cookie;
INIT_LIST_HEAD(&kevent->list);
if (name) {
size_t len, rem, event_size = sizeof(struct inotify_event);
/*
* We need to pad the filename so as to properly align an
* array of inotify_event structures. Because the structure is
* small and the common case is a small filename, we just round
* up to the next multiple of the structure's sizeof. This is
* simple and safe for all architectures.
*/
len = strlen(name) + 1;
rem = event_size - len;
if (len > event_size) {
rem = event_size - (len % event_size);
if (len % event_size == 0)
rem = 0;
}
kevent->name = kmalloc(len + rem, GFP_NOFS);
if (unlikely(!kevent->name)) {
kmem_cache_free(event_cachep, kevent);
return NULL;
}
memcpy(kevent->name, name, len);
if (rem)
memset(kevent->name + len, 0, rem);
kevent->event.len = len + rem;
} else {
kevent->event.len = 0;
kevent->name = NULL;
}
return kevent;
}
/*
* inotify_dev_get_event - return the next event in the given dev's queue
*
* Caller must hold dev->ev_mutex.
*/
static inline struct inotify_kernel_event *
inotify_dev_get_event(struct inotify_device *dev)
{
return list_entry(dev->events.next, struct inotify_kernel_event, list);
}
/*
* inotify_dev_get_last_event - return the last event in the given dev's queue
*
* Caller must hold dev->ev_mutex.
*/
static inline struct inotify_kernel_event *
inotify_dev_get_last_event(struct inotify_device *dev)
{ {
if (list_empty(&dev->events)) __u32 mask;
return NULL;
return list_entry(dev->events.prev, struct inotify_kernel_event, list);
}
/* /* everything should accept their own ignored and cares about children */
* inotify_dev_queue_event - event handler registered with core inotify, adds mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD);
* a new event to the given device
*
* Can sleep (calls kernel_event()).
*/
static void inotify_dev_queue_event(struct inotify_watch *w, u32 wd, u32 mask,
u32 cookie, const char *name,
struct inode *ignored)
{
struct inotify_user_watch *watch;
struct inotify_device *dev;
struct inotify_kernel_event *kevent, *last;
watch = container_of(w, struct inotify_user_watch, wdata); /* mask off the flags used to open the fd */
dev = watch->dev; mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT));
mutex_lock(&dev->ev_mutex); return mask;
/* we can safely put the watch as we don't reference it while
* generating the event
*/
if (mask & IN_IGNORED || w->mask & IN_ONESHOT)
put_inotify_watch(w); /* final put */
/* coalescing: drop this event if it is a dupe of the previous */
last = inotify_dev_get_last_event(dev);
if (last && last->event.mask == mask && last->event.wd == wd &&
last->event.cookie == cookie) {
const char *lastname = last->name;
if (!name && !lastname)
goto out;
if (name && lastname && !strcmp(lastname, name))
goto out;
}
/* the queue overflowed and we already sent the Q_OVERFLOW event */
if (unlikely(dev->event_count > dev->max_events))
goto out;
/* if the queue overflows, we need to notify user space */
if (unlikely(dev->event_count == dev->max_events))
kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL);
else
kevent = kernel_event(wd, mask, cookie, name);
if (unlikely(!kevent))
goto out;
/* queue the event and wake up anyone waiting */
dev->event_count++;
dev->queue_size += sizeof(struct inotify_event) + kevent->event.len;
list_add_tail(&kevent->list, &dev->events);
wake_up_interruptible(&dev->wq);
kill_fasync(&dev->fa, SIGIO, POLL_IN);
out:
mutex_unlock(&dev->ev_mutex);
}
/*
* remove_kevent - cleans up the given kevent
*
* Caller must hold dev->ev_mutex.
*/
static void remove_kevent(struct inotify_device *dev,
struct inotify_kernel_event *kevent)
{
list_del(&kevent->list);
dev->event_count--;
dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len;
}
/*
* free_kevent - frees the given kevent.
*/
static void free_kevent(struct inotify_kernel_event *kevent)
{
kfree(kevent->name);
kmem_cache_free(event_cachep, kevent);
}
/*
* inotify_dev_event_dequeue - destroy an event on the given device
*
* Caller must hold dev->ev_mutex.
*/
static void inotify_dev_event_dequeue(struct inotify_device *dev)
{
if (!list_empty(&dev->events)) {
struct inotify_kernel_event *kevent;
kevent = inotify_dev_get_event(dev);
remove_kevent(dev, kevent);
free_kevent(kevent);
}
}
/*
* find_inode - resolve a user-given path to a specific inode
*/
static int find_inode(const char __user *dirname, struct path *path,
unsigned flags)
{
int error;
error = user_path_at(AT_FDCWD, dirname, flags, path);
if (error)
return error;
/* you can only watch an inode if you have read permissions on it */
error = inode_permission(path->dentry->d_inode, MAY_READ);
if (error)
path_put(path);
return error;
} }
/* static inline u32 inotify_mask_to_arg(__u32 mask)
* create_watch - creates a watch on the given device.
*
* Callers must hold dev->up_mutex.
*/
static int create_watch(struct inotify_device *dev, struct inode *inode,
u32 mask)
{ {
struct inotify_user_watch *watch; return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
int ret; IN_Q_OVERFLOW);
if (atomic_read(&dev->user->inotify_watches) >=
inotify_max_user_watches)
return -ENOSPC;
watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL);
if (unlikely(!watch))
return -ENOMEM;
/* save a reference to device and bump the count to make it official */
get_inotify_dev(dev);
watch->dev = dev;
atomic_inc(&dev->user->inotify_watches);
inotify_init_watch(&watch->wdata);
ret = inotify_add_watch(dev->ih, &watch->wdata, inode, mask);
if (ret < 0)
free_inotify_user_watch(&watch->wdata);
return ret;
} }
/* Device Interface */ /* intofiy userspace file descriptor functions */
static unsigned int inotify_poll(struct file *file, poll_table *wait) static unsigned int inotify_poll(struct file *file, poll_table *wait)
{ {
struct inotify_device *dev = file->private_data; struct fsnotify_group *group = file->private_data;
int ret = 0; int ret = 0;
poll_wait(file, &dev->wq, wait); poll_wait(file, &group->notification_waitq, wait);
mutex_lock(&dev->ev_mutex); mutex_lock(&group->notification_mutex);
if (!list_empty(&dev->events)) if (!fsnotify_notify_queue_is_empty(group))
ret = POLLIN | POLLRDNORM; ret = POLLIN | POLLRDNORM;
mutex_unlock(&dev->ev_mutex); mutex_unlock(&group->notification_mutex);
return ret; return ret;
} }
...@@ -432,26 +145,29 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait) ...@@ -432,26 +145,29 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait)
* enough to fit in "count". Return an error pointer if * enough to fit in "count". Return an error pointer if
* not large enough. * not large enough.
* *
* Called with the device ev_mutex held. * Called with the group->notification_mutex held.
*/ */
static struct inotify_kernel_event *get_one_event(struct inotify_device *dev, static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
size_t count) size_t count)
{ {
size_t event_size = sizeof(struct inotify_event); size_t event_size = sizeof(struct inotify_event);
struct inotify_kernel_event *kevent; struct fsnotify_event *event;
if (list_empty(&dev->events)) if (fsnotify_notify_queue_is_empty(group))
return NULL; return NULL;
kevent = inotify_dev_get_event(dev); event = fsnotify_peek_notify_event(group);
if (kevent->name)
event_size += kevent->event.len; event_size += roundup(event->name_len, event_size);
if (event_size > count) if (event_size > count)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
remove_kevent(dev, kevent); /* held the notification_mutex the whole time, so this is the
return kevent; * same event we peeked above */
fsnotify_remove_notify_event(group);
return event;
} }
/* /*
...@@ -460,51 +176,90 @@ static struct inotify_kernel_event *get_one_event(struct inotify_device *dev, ...@@ -460,51 +176,90 @@ static struct inotify_kernel_event *get_one_event(struct inotify_device *dev,
* We already checked that the event size is smaller than the * We already checked that the event size is smaller than the
* buffer we had in "get_one_event()" above. * buffer we had in "get_one_event()" above.
*/ */
static ssize_t copy_event_to_user(struct inotify_kernel_event *kevent, static ssize_t copy_event_to_user(struct fsnotify_group *group,
struct fsnotify_event *event,
char __user *buf) char __user *buf)
{ {
struct inotify_event inotify_event;
struct fsnotify_event_private_data *fsn_priv;
struct inotify_event_private_data *priv;
size_t event_size = sizeof(struct inotify_event); size_t event_size = sizeof(struct inotify_event);
size_t name_len;
/* we get the inotify watch descriptor from the event private data */
spin_lock(&event->lock);
fsn_priv = fsnotify_remove_priv_from_event(group, event);
spin_unlock(&event->lock);
if (!fsn_priv)
inotify_event.wd = -1;
else {
priv = container_of(fsn_priv, struct inotify_event_private_data,
fsnotify_event_priv_data);
inotify_event.wd = priv->wd;
inotify_free_event_priv(fsn_priv);
}
/* round up event->name_len so it is a multiple of event_size */
name_len = roundup(event->name_len, event_size);
inotify_event.len = name_len;
inotify_event.mask = inotify_mask_to_arg(event->mask);
inotify_event.cookie = event->sync_cookie;
if (copy_to_user(buf, &kevent->event, event_size)) /* send the main event */
if (copy_to_user(buf, &inotify_event, event_size))
return -EFAULT; return -EFAULT;
if (kevent->name) { buf += event_size;
buf += event_size;
if (copy_to_user(buf, kevent->name, kevent->event.len)) /*
* fsnotify only stores the pathname, so here we have to send the pathname
* and then pad that pathname out to a multiple of sizeof(inotify_event)
* with zeros. I get my zeros from the nul_inotify_event.
*/
if (name_len) {
unsigned int len_to_zero = name_len - event->name_len;
/* copy the path name */
if (copy_to_user(buf, event->file_name, event->name_len))
return -EFAULT; return -EFAULT;
buf += event->name_len;
event_size += kevent->event.len; /* fill userspace with 0's from nul_inotify_event */
if (copy_to_user(buf, &nul_inotify_event, len_to_zero))
return -EFAULT;
buf += len_to_zero;
event_size += name_len;
} }
return event_size; return event_size;
} }
static ssize_t inotify_read(struct file *file, char __user *buf, static ssize_t inotify_read(struct file *file, char __user *buf,
size_t count, loff_t *pos) size_t count, loff_t *pos)
{ {
struct inotify_device *dev; struct fsnotify_group *group;
struct fsnotify_event *kevent;
char __user *start; char __user *start;
int ret; int ret;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
start = buf; start = buf;
dev = file->private_data; group = file->private_data;
while (1) { while (1) {
struct inotify_kernel_event *kevent; prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); mutex_lock(&group->notification_mutex);
kevent = get_one_event(group, count);
mutex_lock(&dev->ev_mutex); mutex_unlock(&group->notification_mutex);
kevent = get_one_event(dev, count);
mutex_unlock(&dev->ev_mutex);
if (kevent) { if (kevent) {
ret = PTR_ERR(kevent); ret = PTR_ERR(kevent);
if (IS_ERR(kevent)) if (IS_ERR(kevent))
break; break;
ret = copy_event_to_user(kevent, buf); ret = copy_event_to_user(group, kevent, buf);
free_kevent(kevent); fsnotify_put_event(kevent);
if (ret < 0) if (ret < 0)
break; break;
buf += ret; buf += ret;
...@@ -525,7 +280,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf, ...@@ -525,7 +280,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
schedule(); schedule();
} }
finish_wait(&dev->wq, &wait); finish_wait(&group->notification_waitq, &wait);
if (start != buf && ret != -EFAULT) if (start != buf && ret != -EFAULT)
ret = buf - start; ret = buf - start;
return ret; return ret;
...@@ -533,25 +288,19 @@ static ssize_t inotify_read(struct file *file, char __user *buf, ...@@ -533,25 +288,19 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
static int inotify_fasync(int fd, struct file *file, int on) static int inotify_fasync(int fd, struct file *file, int on)
{ {
struct inotify_device *dev = file->private_data; struct fsnotify_group *group = file->private_data;
return fasync_helper(fd, file, on, &dev->fa) >= 0 ? 0 : -EIO; return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -EIO;
} }
static int inotify_release(struct inode *ignored, struct file *file) static int inotify_release(struct inode *ignored, struct file *file)
{ {
struct inotify_device *dev = file->private_data; struct fsnotify_group *group = file->private_data;
inotify_destroy(dev->ih);
/* destroy all of the events on this device */ fsnotify_clear_marks_by_group(group);
mutex_lock(&dev->ev_mutex);
while (!list_empty(&dev->events))
inotify_dev_event_dequeue(dev);
mutex_unlock(&dev->ev_mutex);
/* free this device: the put matching the get in inotify_init() */ /* free this group, matching get was inotify_init->fsnotify_obtain_group */
put_inotify_dev(dev); fsnotify_put_group(group);
return 0; return 0;
} }
...@@ -559,16 +308,27 @@ static int inotify_release(struct inode *ignored, struct file *file) ...@@ -559,16 +308,27 @@ static int inotify_release(struct inode *ignored, struct file *file)
static long inotify_ioctl(struct file *file, unsigned int cmd, static long inotify_ioctl(struct file *file, unsigned int cmd,
unsigned long arg) unsigned long arg)
{ {
struct inotify_device *dev; struct fsnotify_group *group;
struct fsnotify_event_holder *holder;
struct fsnotify_event *event;
void __user *p; void __user *p;
int ret = -ENOTTY; int ret = -ENOTTY;
size_t send_len = 0;
dev = file->private_data; group = file->private_data;
p = (void __user *) arg; p = (void __user *) arg;
switch (cmd) { switch (cmd) {
case FIONREAD: case FIONREAD:
ret = put_user(dev->queue_size, (int __user *) p); mutex_lock(&group->notification_mutex);
list_for_each_entry(holder, &group->notification_list, event_list) {
event = holder->event;
send_len += sizeof(struct inotify_event);
send_len += roundup(event->name_len,
sizeof(struct inotify_event));
}
mutex_unlock(&group->notification_mutex);
ret = put_user(send_len, (int __user *) p);
break; break;
} }
...@@ -576,23 +336,233 @@ static long inotify_ioctl(struct file *file, unsigned int cmd, ...@@ -576,23 +336,233 @@ static long inotify_ioctl(struct file *file, unsigned int cmd,
} }
static const struct file_operations inotify_fops = { static const struct file_operations inotify_fops = {
.poll = inotify_poll, .poll = inotify_poll,
.read = inotify_read, .read = inotify_read,
.fasync = inotify_fasync, .fasync = inotify_fasync,
.release = inotify_release, .release = inotify_release,
.unlocked_ioctl = inotify_ioctl, .unlocked_ioctl = inotify_ioctl,
.compat_ioctl = inotify_ioctl, .compat_ioctl = inotify_ioctl,
}; };
static const struct inotify_operations inotify_user_ops = {
.handle_event = inotify_dev_queue_event,
.destroy_watch = free_inotify_user_watch,
};
/*
* find_inode - resolve a user-given path to a specific inode
*/
static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
{
int error;
error = user_path_at(AT_FDCWD, dirname, flags, path);
if (error)
return error;
/* you can only watch an inode if you have read permissions on it */
error = inode_permission(path->dentry->d_inode, MAY_READ);
if (error)
path_put(path);
return error;
}
/*
* When, for whatever reason, inotify is done with a mark (or what used to be a
* watch) we need to remove that watch from the idr and we need to send IN_IGNORED
* for the given wd.
*
* There is a bit of recursion here. The loop looks like:
* inotify_destroy_mark_entry -> fsnotify_destroy_mark_by_entry ->
* inotify_freeing_mark -> inotify_destory_mark_entry -> restart
* But the loop is broken in 2 places. fsnotify_destroy_mark_by_entry sets
* entry->group = NULL before the call to inotify_freeing_mark, so the if (egroup)
* test below will not call back to fsnotify again. But even if that test wasn't
* there this would still be safe since fsnotify_destroy_mark_by_entry() is
* safe from recursion.
*/
void inotify_destroy_mark_entry(struct fsnotify_mark_entry *entry, struct fsnotify_group *group)
{
struct inotify_inode_mark_entry *ientry;
struct inotify_event_private_data *event_priv;
struct fsnotify_event_private_data *fsn_event_priv;
struct fsnotify_group *egroup;
struct idr *idr;
spin_lock(&entry->lock);
egroup = entry->group;
/* if egroup we aren't really done and something might still send events
* for this inode, on the callback we'll send the IN_IGNORED */
if (egroup) {
spin_unlock(&entry->lock);
fsnotify_destroy_mark_by_entry(entry);
return;
}
spin_unlock(&entry->lock);
ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL);
if (unlikely(!event_priv))
goto skip_send_ignore;
fsn_event_priv = &event_priv->fsnotify_event_priv_data;
fsn_event_priv->group = group;
event_priv->wd = ientry->wd;
fsnotify_add_notify_event(group, inotify_ignored_event, fsn_event_priv);
/* did the private data get added? */
if (list_empty(&fsn_event_priv->event_list))
inotify_free_event_priv(fsn_event_priv);
skip_send_ignore:
/* remove this entry from the idr */
spin_lock(&group->inotify_data.idr_lock);
idr = &group->inotify_data.idr;
idr_remove(idr, ientry->wd);
spin_unlock(&group->inotify_data.idr_lock);
/* removed from idr, drop that reference */
fsnotify_put_mark(entry);
}
/* ding dong the mark is dead */
static void inotify_free_mark(struct fsnotify_mark_entry *entry)
{
struct inotify_inode_mark_entry *ientry = (struct inotify_inode_mark_entry *)entry;
kmem_cache_free(inotify_inode_mark_cachep, ientry);
}
static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
{
struct fsnotify_mark_entry *entry = NULL;
struct inotify_inode_mark_entry *ientry;
int ret = 0;
int add = (arg & IN_MASK_ADD);
__u32 mask;
__u32 old_mask, new_mask;
/* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg);
if (unlikely(!mask))
return -EINVAL;
ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
if (unlikely(!ientry))
return -ENOMEM;
/* we set the mask at the end after attaching it */
fsnotify_init_mark(&ientry->fsn_entry, inotify_free_mark);
ientry->wd = 0;
find_entry:
spin_lock(&inode->i_lock);
entry = fsnotify_find_mark_entry(group, inode);
spin_unlock(&inode->i_lock);
if (entry) {
kmem_cache_free(inotify_inode_mark_cachep, ientry);
ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
} else {
if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) {
ret = -ENOSPC;
goto out_err;
}
ret = fsnotify_add_mark(&ientry->fsn_entry, group, inode);
if (ret == -EEXIST)
goto find_entry;
else if (ret)
goto out_err;
entry = &ientry->fsn_entry;
retry:
ret = -ENOMEM;
if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
goto out_err;
spin_lock(&group->inotify_data.idr_lock);
/* if entry is added to the idr we keep the reference obtained
* through fsnotify_mark_add. remember to drop this reference
* when entry is removed from idr */
ret = idr_get_new_above(&group->inotify_data.idr, entry,
++group->inotify_data.last_wd,
&ientry->wd);
spin_unlock(&group->inotify_data.idr_lock);
if (ret) {
if (ret == -EAGAIN)
goto retry;
goto out_err;
}
atomic_inc(&group->inotify_data.user->inotify_watches);
}
spin_lock(&entry->lock);
old_mask = entry->mask;
if (add) {
entry->mask |= mask;
new_mask = entry->mask;
} else {
entry->mask = mask;
new_mask = entry->mask;
}
spin_unlock(&entry->lock);
if (old_mask != new_mask) {
/* more bits in old than in new? */
int dropped = (old_mask & ~new_mask);
/* more bits in this entry than the inode's mask? */
int do_inode = (new_mask & ~inode->i_fsnotify_mask);
/* more bits in this entry than the group? */
int do_group = (new_mask & ~group->mask);
/* update the inode with this new entry */
if (dropped || do_inode)
fsnotify_recalc_inode_mask(inode);
/* update the group mask with the new mask */
if (dropped || do_group)
fsnotify_recalc_group_mask(group);
}
return ientry->wd;
out_err:
/* see this isn't supposed to happen, just kill the watch */
if (entry) {
fsnotify_destroy_mark_by_entry(entry);
fsnotify_put_mark(entry);
}
return ret;
}
static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events)
{
struct fsnotify_group *group;
unsigned int grp_num;
/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
grp_num = (INOTIFY_GROUP_NUM - atomic_inc_return(&inotify_grp_num));
group = fsnotify_obtain_group(grp_num, 0, &inotify_fsnotify_ops);
if (IS_ERR(group))
return group;
group->max_events = max_events;
spin_lock_init(&group->inotify_data.idr_lock);
idr_init(&group->inotify_data.idr);
group->inotify_data.last_wd = 0;
group->inotify_data.user = user;
group->inotify_data.fa = NULL;
return group;
}
/* inotify syscalls */
SYSCALL_DEFINE1(inotify_init1, int, flags) SYSCALL_DEFINE1(inotify_init1, int, flags)
{ {
struct inotify_device *dev; struct fsnotify_group *group;
struct inotify_handle *ih;
struct user_struct *user; struct user_struct *user;
struct file *filp; struct file *filp;
int fd, ret; int fd, ret;
...@@ -621,45 +591,27 @@ SYSCALL_DEFINE1(inotify_init1, int, flags) ...@@ -621,45 +591,27 @@ SYSCALL_DEFINE1(inotify_init1, int, flags)
goto out_free_uid; goto out_free_uid;
} }
dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL); /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
if (unlikely(!dev)) { group = inotify_new_group(user, inotify_max_queued_events);
ret = -ENOMEM; if (IS_ERR(group)) {
ret = PTR_ERR(group);
goto out_free_uid; goto out_free_uid;
} }
ih = inotify_init(&inotify_user_ops);
if (IS_ERR(ih)) {
ret = PTR_ERR(ih);
goto out_free_dev;
}
dev->ih = ih;
dev->fa = NULL;
filp->f_op = &inotify_fops; filp->f_op = &inotify_fops;
filp->f_path.mnt = mntget(inotify_mnt); filp->f_path.mnt = mntget(inotify_mnt);
filp->f_path.dentry = dget(inotify_mnt->mnt_root); filp->f_path.dentry = dget(inotify_mnt->mnt_root);
filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping; filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping;
filp->f_mode = FMODE_READ; filp->f_mode = FMODE_READ;
filp->f_flags = O_RDONLY | (flags & O_NONBLOCK); filp->f_flags = O_RDONLY | (flags & O_NONBLOCK);
filp->private_data = dev; filp->private_data = group;
INIT_LIST_HEAD(&dev->events);
init_waitqueue_head(&dev->wq);
mutex_init(&dev->ev_mutex);
mutex_init(&dev->up_mutex);
dev->event_count = 0;
dev->queue_size = 0;
dev->max_events = inotify_max_queued_events;
dev->user = user;
atomic_set(&dev->count, 0);
get_inotify_dev(dev);
atomic_inc(&user->inotify_devs); atomic_inc(&user->inotify_devs);
fd_install(fd, filp); fd_install(fd, filp);
return fd; return fd;
out_free_dev:
kfree(dev);
out_free_uid: out_free_uid:
free_uid(user); free_uid(user);
put_filp(filp); put_filp(filp);
...@@ -676,8 +628,8 @@ SYSCALL_DEFINE0(inotify_init) ...@@ -676,8 +628,8 @@ SYSCALL_DEFINE0(inotify_init)
SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname, SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
u32, mask) u32, mask)
{ {
struct fsnotify_group *group;
struct inode *inode; struct inode *inode;
struct inotify_device *dev;
struct path path; struct path path;
struct file *filp; struct file *filp;
int ret, fput_needed; int ret, fput_needed;
...@@ -698,20 +650,20 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname, ...@@ -698,20 +650,20 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
if (mask & IN_ONLYDIR) if (mask & IN_ONLYDIR)
flags |= LOOKUP_DIRECTORY; flags |= LOOKUP_DIRECTORY;
ret = find_inode(pathname, &path, flags); ret = inotify_find_inode(pathname, &path, flags);
if (unlikely(ret)) if (ret)
goto fput_and_out; goto fput_and_out;
/* inode held in place by reference to path; dev by fget on fd */ /* inode held in place by reference to path; group by fget on fd */
inode = path.dentry->d_inode; inode = path.dentry->d_inode;
dev = filp->private_data; group = filp->private_data;
mutex_lock(&dev->up_mutex); /* create/update an inode mark */
ret = inotify_find_update_watch(dev->ih, inode, mask); ret = inotify_update_watch(group, inode, mask);
if (ret == -ENOENT) if (unlikely(ret))
ret = create_watch(dev, inode, mask); goto path_put_and_out;
mutex_unlock(&dev->up_mutex);
path_put_and_out:
path_put(&path); path_put(&path);
fput_and_out: fput_and_out:
fput_light(filp, fput_needed); fput_light(filp, fput_needed);
...@@ -720,9 +672,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname, ...@@ -720,9 +672,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd) SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
{ {
struct fsnotify_group *group;
struct fsnotify_mark_entry *entry;
struct file *filp; struct file *filp;
struct inotify_device *dev; int ret = 0, fput_needed;
int ret, fput_needed;
filp = fget_light(fd, &fput_needed); filp = fget_light(fd, &fput_needed);
if (unlikely(!filp)) if (unlikely(!filp))
...@@ -734,10 +687,20 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd) ...@@ -734,10 +687,20 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
goto out; goto out;
} }
dev = filp->private_data; group = filp->private_data;
/* we free our watch data when we get IN_IGNORED */ spin_lock(&group->inotify_data.idr_lock);
ret = inotify_rm_wd(dev->ih, wd); entry = idr_find(&group->inotify_data.idr, wd);
if (unlikely(!entry)) {
spin_unlock(&group->inotify_data.idr_lock);
ret = -EINVAL;
goto out;
}
fsnotify_get_mark(entry);
spin_unlock(&group->inotify_data.idr_lock);
inotify_destroy_mark_entry(entry, group);
fsnotify_put_mark(entry);
out: out:
fput_light(filp, fput_needed); fput_light(filp, fput_needed);
...@@ -753,9 +716,9 @@ inotify_get_sb(struct file_system_type *fs_type, int flags, ...@@ -753,9 +716,9 @@ inotify_get_sb(struct file_system_type *fs_type, int flags,
} }
static struct file_system_type inotify_fs_type = { static struct file_system_type inotify_fs_type = {
.name = "inotifyfs", .name = "inotifyfs",
.get_sb = inotify_get_sb, .get_sb = inotify_get_sb,
.kill_sb = kill_anon_super, .kill_sb = kill_anon_super,
}; };
/* /*
...@@ -775,18 +738,16 @@ static int __init inotify_user_setup(void) ...@@ -775,18 +738,16 @@ static int __init inotify_user_setup(void)
if (IS_ERR(inotify_mnt)) if (IS_ERR(inotify_mnt))
panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt)); panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));
inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC);
event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
inotify_ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, FSNOTIFY_EVENT_NONE, NULL, 0);
if (!inotify_ignored_event)
panic("unable to allocate the inotify ignored event\n");
inotify_max_queued_events = 16384; inotify_max_queued_events = 16384;
inotify_max_user_instances = 128; inotify_max_user_instances = 128;
inotify_max_user_watches = 8192; inotify_max_user_watches = 8192;
watch_cachep = kmem_cache_create("inotify_watch_cache",
sizeof(struct inotify_user_watch),
0, SLAB_PANIC, NULL);
event_cachep = kmem_cache_create("inotify_event_cache",
sizeof(struct inotify_kernel_event),
0, SLAB_PANIC, NULL);
return 0; return 0;
} }
module_init(inotify_user_setup); module_init(inotify_user_setup);
/*
* Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* Basic idea behind the notification queue: An fsnotify group (like inotify)
* sends the userspace notification about events asyncronously some time after
* the event happened. When inotify gets an event it will need to add that
* event to the group notify queue. Since a single event might need to be on
* multiple group's notification queues we can't add the event directly to each
* queue and instead add a small "event_holder" to each queue. This event_holder
* has a pointer back to the original event. Since the majority of events are
* going to end up on one, and only one, notification queue we embed one
* event_holder into each event. This means we have a single allocation instead
* of always needing two. If the embedded event_holder is already in use by
* another group a new event_holder (from fsnotify_event_holder_cachep) will be
* allocated and used.
*/
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/mutex.h>
#include <linux/namei.h>
#include <linux/path.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
static struct kmem_cache *fsnotify_event_cachep;
static struct kmem_cache *fsnotify_event_holder_cachep;
/*
* This is a magic event we send when the q is too full. Since it doesn't
* hold real event information we just keep one system wide and use it any time
* it is needed. It's refcnt is set 1 at kernel init time and will never
* get set to 0 so it will never get 'freed'
*/
static struct fsnotify_event q_overflow_event;
static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
/**
* fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
* Called from fsnotify_move, which is inlined into filesystem modules.
*/
u32 fsnotify_get_cookie(void)
{
return atomic_inc_return(&fsnotify_sync_cookie);
}
EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
/* return true if the notify queue is empty, false otherwise */
bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
{
BUG_ON(!mutex_is_locked(&group->notification_mutex));
return list_empty(&group->notification_list) ? true : false;
}
void fsnotify_get_event(struct fsnotify_event *event)
{
atomic_inc(&event->refcnt);
}
void fsnotify_put_event(struct fsnotify_event *event)
{
if (!event)
return;
if (atomic_dec_and_test(&event->refcnt)) {
if (event->data_type == FSNOTIFY_EVENT_PATH)
path_put(&event->path);
BUG_ON(!list_empty(&event->private_data_list));
kfree(event->file_name);
kmem_cache_free(fsnotify_event_cachep, event);
}
}
struct fsnotify_event_holder *fsnotify_alloc_event_holder(void)
{
return kmem_cache_alloc(fsnotify_event_holder_cachep, GFP_KERNEL);
}
void fsnotify_destroy_event_holder(struct fsnotify_event_holder *holder)
{
kmem_cache_free(fsnotify_event_holder_cachep, holder);
}
/*
* Find the private data that the group previously attached to this event when
* the group added the event to the notification queue (fsnotify_add_notify_event)
*/
struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnotify_group *group, struct fsnotify_event *event)
{
struct fsnotify_event_private_data *lpriv;
struct fsnotify_event_private_data *priv = NULL;
assert_spin_locked(&event->lock);
list_for_each_entry(lpriv, &event->private_data_list, event_list) {
if (lpriv->group == group) {
priv = lpriv;
list_del(&priv->event_list);
break;
}
}
return priv;
}
/*
* Check if 2 events contain the same information. We do not compare private data
* but at this moment that isn't a problem for any know fsnotify listeners.
*/
static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new)
{
if ((old->mask == new->mask) &&
(old->to_tell == new->to_tell) &&
(old->data_type == new->data_type)) {
switch (old->data_type) {
case (FSNOTIFY_EVENT_INODE):
if (old->inode == new->inode)
return true;
break;
case (FSNOTIFY_EVENT_PATH):
if ((old->path.mnt == new->path.mnt) &&
(old->path.dentry == new->path.dentry))
return true;
case (FSNOTIFY_EVENT_NONE):
return true;
};
}
return false;
}
/*
* Add an event to the group notification queue. The group can later pull this
* event off the queue to deal with. If the event is successfully added to the
* group's notification queue, a reference is taken on event.
*/
int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event,
struct fsnotify_event_private_data *priv)
{
struct fsnotify_event_holder *holder = NULL;
struct list_head *list = &group->notification_list;
struct fsnotify_event_holder *last_holder;
struct fsnotify_event *last_event;
/* easy to tell if priv was attached to the event */
INIT_LIST_HEAD(&priv->event_list);
/*
* There is one fsnotify_event_holder embedded inside each fsnotify_event.
* Check if we expect to be able to use that holder. If not alloc a new
* holder.
* For the overflow event it's possible that something will use the in
* event holder before we get the lock so we may need to jump back and
* alloc a new holder, this can't happen for most events...
*/
if (!list_empty(&event->holder.event_list)) {
alloc_holder:
holder = fsnotify_alloc_event_holder();
if (!holder)
return -ENOMEM;
}
mutex_lock(&group->notification_mutex);
if (group->q_len >= group->max_events) {
event = &q_overflow_event;
/* sorry, no private data on the overflow event */
priv = NULL;
}
spin_lock(&event->lock);
if (list_empty(&event->holder.event_list)) {
if (unlikely(holder))
fsnotify_destroy_event_holder(holder);
holder = &event->holder;
} else if (unlikely(!holder)) {
/* between the time we checked above and got the lock the in
* event holder was used, go back and get a new one */
spin_unlock(&event->lock);
mutex_unlock(&group->notification_mutex);
goto alloc_holder;
}
if (!list_empty(list)) {
last_holder = list_entry(list->prev, struct fsnotify_event_holder, event_list);
last_event = last_holder->event;
if (event_compare(last_event, event)) {
spin_unlock(&event->lock);
mutex_unlock(&group->notification_mutex);
if (holder != &event->holder)
fsnotify_destroy_event_holder(holder);
return -EEXIST;
}
}
group->q_len++;
holder->event = event;
fsnotify_get_event(event);
list_add_tail(&holder->event_list, list);
if (priv)
list_add_tail(&priv->event_list, &event->private_data_list);
spin_unlock(&event->lock);
mutex_unlock(&group->notification_mutex);
wake_up(&group->notification_waitq);
return 0;
}
/*
* Remove and return the first event from the notification list. There is a
* reference held on this event since it was on the list. It is the responsibility
* of the caller to drop this reference.
*/
struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group)
{
struct fsnotify_event *event;
struct fsnotify_event_holder *holder;
BUG_ON(!mutex_is_locked(&group->notification_mutex));
holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list);
event = holder->event;
spin_lock(&event->lock);
holder->event = NULL;
list_del_init(&holder->event_list);
spin_unlock(&event->lock);
/* event == holder means we are referenced through the in event holder */
if (holder != &event->holder)
fsnotify_destroy_event_holder(holder);
group->q_len--;
return event;
}
/*
* This will not remove the event, that must be done with fsnotify_remove_notify_event()
*/
struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group)
{
struct fsnotify_event *event;
struct fsnotify_event_holder *holder;
BUG_ON(!mutex_is_locked(&group->notification_mutex));
holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list);
event = holder->event;
return event;
}
/*
* Called when a group is being torn down to clean up any outstanding
* event notifications.
*/
void fsnotify_flush_notify(struct fsnotify_group *group)
{
struct fsnotify_event *event;
struct fsnotify_event_private_data *priv;
mutex_lock(&group->notification_mutex);
while (!fsnotify_notify_queue_is_empty(group)) {
event = fsnotify_remove_notify_event(group);
/* if they don't implement free_event_priv they better not have attached any */
if (group->ops->free_event_priv) {
spin_lock(&event->lock);
priv = fsnotify_remove_priv_from_event(group, event);
spin_unlock(&event->lock);
if (priv)
group->ops->free_event_priv(priv);
}
fsnotify_put_event(event); /* matches fsnotify_add_notify_event */
}
mutex_unlock(&group->notification_mutex);
}
static void initialize_event(struct fsnotify_event *event)
{
event->holder.event = NULL;
INIT_LIST_HEAD(&event->holder.event_list);
atomic_set(&event->refcnt, 1);
spin_lock_init(&event->lock);
event->path.dentry = NULL;
event->path.mnt = NULL;
event->inode = NULL;
event->data_type = FSNOTIFY_EVENT_NONE;
INIT_LIST_HEAD(&event->private_data_list);
event->to_tell = NULL;
event->file_name = NULL;
event->name_len = 0;
event->sync_cookie = 0;
}
/*
* fsnotify_create_event - Allocate a new event which will be sent to each
* group's handle_event function if the group was interested in this
* particular event.
*
* @to_tell the inode which is supposed to receive the event (sometimes a
* parent of the inode to which the event happened.
* @mask what actually happened.
* @data pointer to the object which was actually affected
* @data_type flag indication if the data is a file, path, inode, nothing...
* @name the filename, if available
*/
struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, void *data,
int data_type, const char *name, u32 cookie)
{
struct fsnotify_event *event;
event = kmem_cache_alloc(fsnotify_event_cachep, GFP_KERNEL);
if (!event)
return NULL;
initialize_event(event);
if (name) {
event->file_name = kstrdup(name, GFP_KERNEL);
if (!event->file_name) {
kmem_cache_free(fsnotify_event_cachep, event);
return NULL;
}
event->name_len = strlen(event->file_name);
}
event->sync_cookie = cookie;
event->to_tell = to_tell;
switch (data_type) {
case FSNOTIFY_EVENT_FILE: {
struct file *file = data;
struct path *path = &file->f_path;
event->path.dentry = path->dentry;
event->path.mnt = path->mnt;
path_get(&event->path);
event->data_type = FSNOTIFY_EVENT_PATH;
break;
}
case FSNOTIFY_EVENT_PATH: {
struct path *path = data;
event->path.dentry = path->dentry;
event->path.mnt = path->mnt;
path_get(&event->path);
event->data_type = FSNOTIFY_EVENT_PATH;
break;
}
case FSNOTIFY_EVENT_INODE:
event->inode = data;
event->data_type = FSNOTIFY_EVENT_INODE;
break;
case FSNOTIFY_EVENT_NONE:
event->inode = NULL;
event->path.dentry = NULL;
event->path.mnt = NULL;
break;
default:
BUG();
}
event->mask = mask;
return event;
}
__init int fsnotify_notification_init(void)
{
fsnotify_event_cachep = KMEM_CACHE(fsnotify_event, SLAB_PANIC);
fsnotify_event_holder_cachep = KMEM_CACHE(fsnotify_event_holder, SLAB_PANIC);
initialize_event(&q_overflow_event);
q_overflow_event.mask = FS_Q_OVERFLOW;
return 0;
}
subsys_initcall(fsnotify_notification_init);
...@@ -180,10 +180,12 @@ d_iput: no no no yes ...@@ -180,10 +180,12 @@ d_iput: no no no yes
#define DCACHE_REFERENCED 0x0008 /* Recently used, don't discard. */ #define DCACHE_REFERENCED 0x0008 /* Recently used, don't discard. */
#define DCACHE_UNHASHED 0x0010 #define DCACHE_UNHASHED 0x0010
#define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 /* Parent inode is watched */ #define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 /* Parent inode is watched by inotify */
#define DCACHE_COOKIE 0x0040 /* For use by dcookie subsystem */ #define DCACHE_COOKIE 0x0040 /* For use by dcookie subsystem */
#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080 /* Parent inode is watched by some fsnotify listener */
extern spinlock_t dcache_lock; extern spinlock_t dcache_lock;
extern seqlock_t rename_lock; extern seqlock_t rename_lock;
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
struct dnotify_struct { struct dnotify_struct {
struct dnotify_struct * dn_next; struct dnotify_struct * dn_next;
unsigned long dn_mask; __u32 dn_mask;
int dn_fd; int dn_fd;
struct file * dn_filp; struct file * dn_filp;
fl_owner_t dn_owner; fl_owner_t dn_owner;
...@@ -21,23 +21,18 @@ struct dnotify_struct { ...@@ -21,23 +21,18 @@ struct dnotify_struct {
#ifdef CONFIG_DNOTIFY #ifdef CONFIG_DNOTIFY
extern void __inode_dir_notify(struct inode *, unsigned long); #define DNOTIFY_ALL_EVENTS (FS_DELETE | FS_DELETE_CHILD |\
FS_MODIFY | FS_MODIFY_CHILD |\
FS_ACCESS | FS_ACCESS_CHILD |\
FS_ATTRIB | FS_ATTRIB_CHILD |\
FS_CREATE | FS_DN_RENAME |\
FS_MOVED_FROM | FS_MOVED_TO)
extern void dnotify_flush(struct file *, fl_owner_t); extern void dnotify_flush(struct file *, fl_owner_t);
extern int fcntl_dirnotify(int, struct file *, unsigned long); extern int fcntl_dirnotify(int, struct file *, unsigned long);
extern void dnotify_parent(struct dentry *, unsigned long);
static inline void inode_dir_notify(struct inode *inode, unsigned long event)
{
if (inode->i_dnotify_mask & (event))
__inode_dir_notify(inode, event);
}
#else #else
static inline void __inode_dir_notify(struct inode *inode, unsigned long event)
{
}
static inline void dnotify_flush(struct file *filp, fl_owner_t id) static inline void dnotify_flush(struct file *filp, fl_owner_t id)
{ {
} }
...@@ -47,14 +42,6 @@ static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) ...@@ -47,14 +42,6 @@ static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
return -EINVAL; return -EINVAL;
} }
static inline void dnotify_parent(struct dentry *dentry, unsigned long event)
{
}
static inline void inode_dir_notify(struct inode *inode, unsigned long event)
{
}
#endif /* CONFIG_DNOTIFY */ #endif /* CONFIG_DNOTIFY */
#endif /* __KERNEL __ */ #endif /* __KERNEL __ */
......
...@@ -755,9 +755,9 @@ struct inode { ...@@ -755,9 +755,9 @@ struct inode {
__u32 i_generation; __u32 i_generation;
#ifdef CONFIG_DNOTIFY #ifdef CONFIG_FSNOTIFY
unsigned long i_dnotify_mask; /* Directory notify events */ __u32 i_fsnotify_mask; /* all events this inode cares about */
struct dnotify_struct *i_dnotify; /* for directory notifications */ struct hlist_head i_fsnotify_mark_entries; /* fsnotify mark entries */
#endif #endif
#ifdef CONFIG_INOTIFY #ifdef CONFIG_INOTIFY
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/dnotify.h> #include <linux/dnotify.h>
#include <linux/inotify.h> #include <linux/inotify.h>
#include <linux/fsnotify_backend.h>
#include <linux/audit.h> #include <linux/audit.h>
/* /*
...@@ -22,18 +23,44 @@ ...@@ -22,18 +23,44 @@
static inline void fsnotify_d_instantiate(struct dentry *entry, static inline void fsnotify_d_instantiate(struct dentry *entry,
struct inode *inode) struct inode *inode)
{ {
__fsnotify_d_instantiate(entry, inode);
inotify_d_instantiate(entry, inode); inotify_d_instantiate(entry, inode);
} }
/* Notify this dentry's parent about a child's events. */
static inline void fsnotify_parent(struct dentry *dentry, __u32 mask)
{
__fsnotify_parent(dentry, mask);
inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
}
/* /*
* fsnotify_d_move - entry has been moved * fsnotify_d_move - entry has been moved
* Called with dcache_lock and entry->d_lock held. * Called with dcache_lock and entry->d_lock held.
*/ */
static inline void fsnotify_d_move(struct dentry *entry) static inline void fsnotify_d_move(struct dentry *entry)
{ {
/*
* On move we need to update entry->d_flags to indicate if the new parent
* cares about events from this entry.
*/
__fsnotify_update_dcache_flags(entry);
inotify_d_move(entry); inotify_d_move(entry);
} }
/*
* fsnotify_link_count - inode's link count changed
*/
static inline void fsnotify_link_count(struct inode *inode)
{
inotify_inode_queue_event(inode, IN_ATTRIB, 0, NULL, NULL);
fsnotify(inode, FS_ATTRIB, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
/* /*
* fsnotify_move - file old_name at old_dir was moved to new_name at new_dir * fsnotify_move - file old_name at old_dir was moved to new_name at new_dir
*/ */
...@@ -42,42 +69,62 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, ...@@ -42,42 +69,62 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
int isdir, struct inode *target, struct dentry *moved) int isdir, struct inode *target, struct dentry *moved)
{ {
struct inode *source = moved->d_inode; struct inode *source = moved->d_inode;
u32 cookie = inotify_get_cookie(); u32 in_cookie = inotify_get_cookie();
u32 fs_cookie = fsnotify_get_cookie();
__u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM);
__u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO);
if (old_dir == new_dir) if (old_dir == new_dir)
inode_dir_notify(old_dir, DN_RENAME); old_dir_mask |= FS_DN_RENAME;
else {
inode_dir_notify(old_dir, DN_DELETE);
inode_dir_notify(new_dir, DN_CREATE);
}
if (isdir) if (isdir) {
isdir = IN_ISDIR; isdir = IN_ISDIR;
inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir,cookie,old_name, old_dir_mask |= FS_IN_ISDIR;
new_dir_mask |= FS_IN_ISDIR;
}
inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir, in_cookie, old_name,
source); source);
inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, cookie, new_name, inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, in_cookie, new_name,
source); source);
fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie);
fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie);
if (target) { if (target) {
inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL, NULL); inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL, NULL);
inotify_inode_is_dead(target); inotify_inode_is_dead(target);
/* this is really a link_count change not a removal */
fsnotify_link_count(target);
} }
if (source) { if (source) {
inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL); inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL);
fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0);
} }
audit_inode_child(new_name, moved, new_dir); audit_inode_child(new_name, moved, new_dir);
} }
/*
* fsnotify_inode_delete - and inode is being evicted from cache, clean up is needed
*/
static inline void fsnotify_inode_delete(struct inode *inode)
{
__fsnotify_inode_delete(inode);
}
/* /*
* fsnotify_nameremove - a filename was removed from a directory * fsnotify_nameremove - a filename was removed from a directory
*/ */
static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
{ {
__u32 mask = FS_DELETE;
if (isdir) if (isdir)
isdir = IN_ISDIR; mask |= FS_IN_ISDIR;
dnotify_parent(dentry, DN_DELETE);
inotify_dentry_parent_queue_event(dentry, IN_DELETE|isdir, 0, dentry->d_name.name); fsnotify_parent(dentry, mask);
} }
/* /*
...@@ -87,14 +134,9 @@ static inline void fsnotify_inoderemove(struct inode *inode) ...@@ -87,14 +134,9 @@ static inline void fsnotify_inoderemove(struct inode *inode)
{ {
inotify_inode_queue_event(inode, IN_DELETE_SELF, 0, NULL, NULL); inotify_inode_queue_event(inode, IN_DELETE_SELF, 0, NULL, NULL);
inotify_inode_is_dead(inode); inotify_inode_is_dead(inode);
}
/* fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
* fsnotify_link_count - inode's link count changed __fsnotify_inode_delete(inode);
*/
static inline void fsnotify_link_count(struct inode *inode)
{
inotify_inode_queue_event(inode, IN_ATTRIB, 0, NULL, NULL);
} }
/* /*
...@@ -102,10 +144,11 @@ static inline void fsnotify_link_count(struct inode *inode) ...@@ -102,10 +144,11 @@ static inline void fsnotify_link_count(struct inode *inode)
*/ */
static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
{ {
inode_dir_notify(inode, DN_CREATE);
inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name, inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name,
dentry->d_inode); dentry->d_inode);
audit_inode_child(dentry->d_name.name, dentry, inode); audit_inode_child(dentry->d_name.name, dentry, inode);
fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
} }
/* /*
...@@ -115,11 +158,12 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) ...@@ -115,11 +158,12 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
*/ */
static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry) static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry)
{ {
inode_dir_notify(dir, DN_CREATE);
inotify_inode_queue_event(dir, IN_CREATE, 0, new_dentry->d_name.name, inotify_inode_queue_event(dir, IN_CREATE, 0, new_dentry->d_name.name,
inode); inode);
fsnotify_link_count(inode); fsnotify_link_count(inode);
audit_inode_child(new_dentry->d_name.name, new_dentry, dir); audit_inode_child(new_dentry->d_name.name, new_dentry, dir);
fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, new_dentry->d_name.name, 0);
} }
/* /*
...@@ -127,10 +171,13 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct ...@@ -127,10 +171,13 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct
*/ */
static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
{ {
inode_dir_notify(inode, DN_CREATE); __u32 mask = (FS_CREATE | FS_IN_ISDIR);
inotify_inode_queue_event(inode, IN_CREATE | IN_ISDIR, 0, struct inode *d_inode = dentry->d_inode;
dentry->d_name.name, dentry->d_inode);
inotify_inode_queue_event(inode, mask, 0, dentry->d_name.name, d_inode);
audit_inode_child(dentry->d_name.name, dentry, inode); audit_inode_child(dentry->d_name.name, dentry, inode);
fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
} }
/* /*
...@@ -139,14 +186,15 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) ...@@ -139,14 +186,15 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
static inline void fsnotify_access(struct dentry *dentry) static inline void fsnotify_access(struct dentry *dentry)
{ {
struct inode *inode = dentry->d_inode; struct inode *inode = dentry->d_inode;
u32 mask = IN_ACCESS; __u32 mask = FS_ACCESS;
if (S_ISDIR(inode->i_mode)) if (S_ISDIR(inode->i_mode))
mask |= IN_ISDIR; mask |= FS_IN_ISDIR;
dnotify_parent(dentry, DN_ACCESS);
inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
inotify_inode_queue_event(inode, mask, 0, NULL, NULL); inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
} }
/* /*
...@@ -155,14 +203,15 @@ static inline void fsnotify_access(struct dentry *dentry) ...@@ -155,14 +203,15 @@ static inline void fsnotify_access(struct dentry *dentry)
static inline void fsnotify_modify(struct dentry *dentry) static inline void fsnotify_modify(struct dentry *dentry)
{ {
struct inode *inode = dentry->d_inode; struct inode *inode = dentry->d_inode;
u32 mask = IN_MODIFY; __u32 mask = FS_MODIFY;
if (S_ISDIR(inode->i_mode)) if (S_ISDIR(inode->i_mode))
mask |= IN_ISDIR; mask |= FS_IN_ISDIR;
dnotify_parent(dentry, DN_MODIFY);
inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
inotify_inode_queue_event(inode, mask, 0, NULL, NULL); inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
} }
/* /*
...@@ -171,13 +220,15 @@ static inline void fsnotify_modify(struct dentry *dentry) ...@@ -171,13 +220,15 @@ static inline void fsnotify_modify(struct dentry *dentry)
static inline void fsnotify_open(struct dentry *dentry) static inline void fsnotify_open(struct dentry *dentry)
{ {
struct inode *inode = dentry->d_inode; struct inode *inode = dentry->d_inode;
u32 mask = IN_OPEN; __u32 mask = FS_OPEN;
if (S_ISDIR(inode->i_mode)) if (S_ISDIR(inode->i_mode))
mask |= IN_ISDIR; mask |= FS_IN_ISDIR;
inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
inotify_inode_queue_event(inode, mask, 0, NULL, NULL); inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
} }
/* /*
...@@ -187,15 +238,16 @@ static inline void fsnotify_close(struct file *file) ...@@ -187,15 +238,16 @@ static inline void fsnotify_close(struct file *file)
{ {
struct dentry *dentry = file->f_path.dentry; struct dentry *dentry = file->f_path.dentry;
struct inode *inode = dentry->d_inode; struct inode *inode = dentry->d_inode;
const char *name = dentry->d_name.name;
fmode_t mode = file->f_mode; fmode_t mode = file->f_mode;
u32 mask = (mode & FMODE_WRITE) ? IN_CLOSE_WRITE : IN_CLOSE_NOWRITE; __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
if (S_ISDIR(inode->i_mode)) if (S_ISDIR(inode->i_mode))
mask |= IN_ISDIR; mask |= FS_IN_ISDIR;
inotify_dentry_parent_queue_event(dentry, mask, 0, name);
inotify_inode_queue_event(inode, mask, 0, NULL, NULL); inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0);
} }
/* /*
...@@ -204,13 +256,15 @@ static inline void fsnotify_close(struct file *file) ...@@ -204,13 +256,15 @@ static inline void fsnotify_close(struct file *file)
static inline void fsnotify_xattr(struct dentry *dentry) static inline void fsnotify_xattr(struct dentry *dentry)
{ {
struct inode *inode = dentry->d_inode; struct inode *inode = dentry->d_inode;
u32 mask = IN_ATTRIB; __u32 mask = FS_ATTRIB;
if (S_ISDIR(inode->i_mode)) if (S_ISDIR(inode->i_mode))
mask |= IN_ISDIR; mask |= FS_IN_ISDIR;
inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
inotify_inode_queue_event(inode, mask, 0, NULL, NULL); inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
} }
/* /*
...@@ -220,50 +274,37 @@ static inline void fsnotify_xattr(struct dentry *dentry) ...@@ -220,50 +274,37 @@ static inline void fsnotify_xattr(struct dentry *dentry)
static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
{ {
struct inode *inode = dentry->d_inode; struct inode *inode = dentry->d_inode;
int dn_mask = 0; __u32 mask = 0;
u32 in_mask = 0;
if (ia_valid & ATTR_UID)
mask |= FS_ATTRIB;
if (ia_valid & ATTR_GID)
mask |= FS_ATTRIB;
if (ia_valid & ATTR_SIZE)
mask |= FS_MODIFY;
if (ia_valid & ATTR_UID) {
in_mask |= IN_ATTRIB;
dn_mask |= DN_ATTRIB;
}
if (ia_valid & ATTR_GID) {
in_mask |= IN_ATTRIB;
dn_mask |= DN_ATTRIB;
}
if (ia_valid & ATTR_SIZE) {
in_mask |= IN_MODIFY;
dn_mask |= DN_MODIFY;
}
/* both times implies a utime(s) call */ /* both times implies a utime(s) call */
if ((ia_valid & (ATTR_ATIME | ATTR_MTIME)) == (ATTR_ATIME | ATTR_MTIME)) if ((ia_valid & (ATTR_ATIME | ATTR_MTIME)) == (ATTR_ATIME | ATTR_MTIME))
{ mask |= FS_ATTRIB;
in_mask |= IN_ATTRIB; else if (ia_valid & ATTR_ATIME)
dn_mask |= DN_ATTRIB; mask |= FS_ACCESS;
} else if (ia_valid & ATTR_ATIME) { else if (ia_valid & ATTR_MTIME)
in_mask |= IN_ACCESS; mask |= FS_MODIFY;
dn_mask |= DN_ACCESS;
} else if (ia_valid & ATTR_MTIME) { if (ia_valid & ATTR_MODE)
in_mask |= IN_MODIFY; mask |= FS_ATTRIB;
dn_mask |= DN_MODIFY;
}
if (ia_valid & ATTR_MODE) {
in_mask |= IN_ATTRIB;
dn_mask |= DN_ATTRIB;
}
if (dn_mask) if (mask) {
dnotify_parent(dentry, dn_mask);
if (in_mask) {
if (S_ISDIR(inode->i_mode)) if (S_ISDIR(inode->i_mode))
in_mask |= IN_ISDIR; mask |= FS_IN_ISDIR;
inotify_inode_queue_event(inode, in_mask, 0, NULL, NULL); inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
inotify_dentry_parent_queue_event(dentry, in_mask, 0,
dentry->d_name.name); fsnotify_parent(dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
} }
} }
#ifdef CONFIG_INOTIFY /* inotify helpers */ #if defined(CONFIG_INOTIFY) || defined(CONFIG_FSNOTIFY) /* notify helpers */
/* /*
* fsnotify_oldname_init - save off the old filename before we change it * fsnotify_oldname_init - save off the old filename before we change it
...@@ -281,7 +322,7 @@ static inline void fsnotify_oldname_free(const char *old_name) ...@@ -281,7 +322,7 @@ static inline void fsnotify_oldname_free(const char *old_name)
kfree(old_name); kfree(old_name);
} }
#else /* CONFIG_INOTIFY */ #else /* CONFIG_INOTIFY || CONFIG_FSNOTIFY */
static inline const char *fsnotify_oldname_init(const char *name) static inline const char *fsnotify_oldname_init(const char *name)
{ {
......
/*
* Filesystem access notification for Linux
*
* Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
*/
#ifndef __LINUX_FSNOTIFY_BACKEND_H
#define __LINUX_FSNOTIFY_BACKEND_H
#ifdef __KERNEL__
#include <linux/idr.h> /* inotify uses this */
#include <linux/fs.h> /* struct inode */
#include <linux/list.h>
#include <linux/path.h> /* struct path */
#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/atomic.h>
/*
* IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily
* convert between them. dnotify only needs conversion at watch creation
* so no perf loss there. fanotify isn't defined yet, so it can use the
* wholes if it needs more events.
*/
#define FS_ACCESS 0x00000001 /* File was accessed */
#define FS_MODIFY 0x00000002 /* File was modified */
#define FS_ATTRIB 0x00000004 /* Metadata changed */
#define FS_CLOSE_WRITE 0x00000008 /* Writtable file was closed */
#define FS_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */
#define FS_OPEN 0x00000020 /* File was opened */
#define FS_MOVED_FROM 0x00000040 /* File was moved from X */
#define FS_MOVED_TO 0x00000080 /* File was moved to Y */
#define FS_CREATE 0x00000100 /* Subfile was created */
#define FS_DELETE 0x00000200 /* Subfile was deleted */
#define FS_DELETE_SELF 0x00000400 /* Self was deleted */
#define FS_MOVE_SELF 0x00000800 /* Self was moved */
#define FS_UNMOUNT 0x00002000 /* inode on umount fs */
#define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
#define FS_IN_IGNORED 0x00008000 /* last inotify event here */
#define FS_IN_ISDIR 0x40000000 /* event occurred against dir */
#define FS_IN_ONESHOT 0x80000000 /* only send event once */
#define FS_DN_RENAME 0x10000000 /* file renamed */
#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */
/* This inode cares about things that happen to its children. Always set for
* dnotify and inotify. */
#define FS_EVENT_ON_CHILD 0x08000000
/* This is a list of all events that may get sent to a parernt based on fs event
* happening to inodes inside that directory */
#define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\
FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\
FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\
FS_DELETE)
/* listeners that hard code group numbers near the top */
#define DNOTIFY_GROUP_NUM UINT_MAX
#define INOTIFY_GROUP_NUM (DNOTIFY_GROUP_NUM-1)
struct fsnotify_group;
struct fsnotify_event;
struct fsnotify_mark_entry;
struct fsnotify_event_private_data;
/*
* Each group much define these ops. The fsnotify infrastructure will call
* these operations for each relevant group.
*
* should_send_event - given a group, inode, and mask this function determines
* if the group is interested in this event.
* handle_event - main call for a group to handle an fs event
* free_group_priv - called when a group refcnt hits 0 to clean up the private union
* freeing-mark - this means that a mark has been flagged to die when everything
* finishes using it. The function is supplied with what must be a
* valid group and inode to use to clean up.
*/
struct fsnotify_ops {
bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode, __u32 mask);
int (*handle_event)(struct fsnotify_group *group, struct fsnotify_event *event);
void (*free_group_priv)(struct fsnotify_group *group);
void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
void (*free_event_priv)(struct fsnotify_event_private_data *priv);
};
/*
* A group is a "thing" that wants to receive notification about filesystem
* events. The mask holds the subset of event types this group cares about.
* refcnt on a group is up to the implementor and at any moment if it goes 0
* everything will be cleaned up.
*/
struct fsnotify_group {
/*
* global list of all groups receiving events from fsnotify.
* anchored by fsnotify_groups and protected by either fsnotify_grp_mutex
* or fsnotify_grp_srcu depending on write vs read.
*/
struct list_head group_list;
/*
* Defines all of the event types in which this group is interested.
* This mask is a bitwise OR of the FS_* events from above. Each time
* this mask changes for a group (if it changes) the correct functions
* must be called to update the global structures which indicate global
* interest in event types.
*/
__u32 mask;
/*
* How the refcnt is used is up to each group. When the refcnt hits 0
* fsnotify will clean up all of the resources associated with this group.
* As an example, the dnotify group will always have a refcnt=1 and that
* will never change. Inotify, on the other hand, has a group per
* inotify_init() and the refcnt will hit 0 only when that fd has been
* closed.
*/
atomic_t refcnt; /* things with interest in this group */
unsigned int group_num; /* simply prevents accidental group collision */
const struct fsnotify_ops *ops; /* how this group handles things */
/* needed to send notification to userspace */
struct mutex notification_mutex; /* protect the notification_list */
struct list_head notification_list; /* list of event_holder this group needs to send to userspace */
wait_queue_head_t notification_waitq; /* read() on the notification file blocks on this waitq */
unsigned int q_len; /* events on the queue */
unsigned int max_events; /* maximum events allowed on the list */
/* stores all fastapth entries assoc with this group so they can be cleaned on unregister */
spinlock_t mark_lock; /* protect mark_entries list */
atomic_t num_marks; /* 1 for each mark entry and 1 for not being
* past the point of no return when freeing
* a group */
struct list_head mark_entries; /* all inode mark entries for this group */
/* prevents double list_del of group_list. protected by global fsnotify_grp_mutex */
bool on_group_list;
/* groups can define private fields here or use the void *private */
union {
void *private;
#ifdef CONFIG_INOTIFY_USER
struct inotify_group_private_data {
spinlock_t idr_lock;
struct idr idr;
u32 last_wd;
struct fasync_struct *fa; /* async notification */
struct user_struct *user;
} inotify_data;
#endif
};
};
/*
* A single event can be queued in multiple group->notification_lists.
*
* each group->notification_list will point to an event_holder which in turns points
* to the actual event that needs to be sent to userspace.
*
* Seemed cheaper to create a refcnt'd event and a small holder for every group
* than create a different event for every group
*
*/
struct fsnotify_event_holder {
struct fsnotify_event *event;
struct list_head event_list;
};
/*
* Inotify needs to tack data onto an event. This struct lets us later find the
* correct private data of the correct group.
*/
struct fsnotify_event_private_data {
struct fsnotify_group *group;
struct list_head event_list;
};
/*
* all of the information about the original object we want to now send to
* a group. If you want to carry more info from the accessing task to the
* listener this structure is where you need to be adding fields.
*/
struct fsnotify_event {
/*
* If we create an event we are also likely going to need a holder
* to link to a group. So embed one holder in the event. Means only
* one allocation for the common case where we only have one group
*/
struct fsnotify_event_holder holder;
spinlock_t lock; /* protection for the associated event_holder and private_list */
/* to_tell may ONLY be dereferenced during handle_event(). */
struct inode *to_tell; /* either the inode the event happened to or its parent */
/*
* depending on the event type we should have either a path or inode
* We hold a reference on path, but NOT on inode. Since we have the ref on
* the path, it may be dereferenced at any point during this object's
* lifetime. That reference is dropped when this object's refcnt hits
* 0. If this event contains an inode instead of a path, the inode may
* ONLY be used during handle_event().
*/
union {
struct path path;
struct inode *inode;
};
/* when calling fsnotify tell it if the data is a path or inode */
#define FSNOTIFY_EVENT_NONE 0
#define FSNOTIFY_EVENT_PATH 1
#define FSNOTIFY_EVENT_INODE 2
#define FSNOTIFY_EVENT_FILE 3
int data_type; /* which of the above union we have */
atomic_t refcnt; /* how many groups still are using/need to send this event */
__u32 mask; /* the type of access, bitwise OR for FS_* event types */
u32 sync_cookie; /* used to corrolate events, namely inotify mv events */
char *file_name;
size_t name_len;
struct list_head private_data_list; /* groups can store private data here */
};
/*
* a mark is simply an entry attached to an in core inode which allows an
* fsnotify listener to indicate they are either no longer interested in events
* of a type matching mask or only interested in those events.
*
* these are flushed when an inode is evicted from core and may be flushed
* when the inode is modified (as seen by fsnotify_access). Some fsnotify users
* (such as dnotify) will flush these when the open fd is closed and not at
* inode eviction or modification.
*/
struct fsnotify_mark_entry {
__u32 mask; /* mask this mark entry is for */
/* we hold ref for each i_list and g_list. also one ref for each 'thing'
* in kernel that found and may be using this mark. */
atomic_t refcnt; /* active things looking at this mark */
struct inode *inode; /* inode this entry is associated with */
struct fsnotify_group *group; /* group this mark entry is for */
struct hlist_node i_list; /* list of mark_entries by inode->i_fsnotify_mark_entries */
struct list_head g_list; /* list of mark_entries by group->i_fsnotify_mark_entries */
spinlock_t lock; /* protect group, inode, and killme */
struct list_head free_i_list; /* tmp list used when freeing this mark */
struct list_head free_g_list; /* tmp list used when freeing this mark */
void (*free_mark)(struct fsnotify_mark_entry *entry); /* called on final put+free */
};
#ifdef CONFIG_FSNOTIFY
/* called from the vfs helpers */
/* main fsnotify call to send events */
extern void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
const char *name, u32 cookie);
extern void __fsnotify_parent(struct dentry *dentry, __u32 mask);
extern void __fsnotify_inode_delete(struct inode *inode);
extern u32 fsnotify_get_cookie(void);
static inline int fsnotify_inode_watches_children(struct inode *inode)
{
/* FS_EVENT_ON_CHILD is set if the inode may care */
if (!(inode->i_fsnotify_mask & FS_EVENT_ON_CHILD))
return 0;
/* this inode might care about child events, does it care about the
* specific set of events that can happen on a child? */
return inode->i_fsnotify_mask & FS_EVENTS_POSS_ON_CHILD;
}
/*
* Update the dentry with a flag indicating the interest of its parent to receive
* filesystem events when those events happens to this dentry->d_inode.
*/
static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
{
struct dentry *parent;
assert_spin_locked(&dcache_lock);
assert_spin_locked(&dentry->d_lock);
parent = dentry->d_parent;
if (fsnotify_inode_watches_children(parent->d_inode))
dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
else
dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
}
/*
* fsnotify_d_instantiate - instantiate a dentry for inode
* Called with dcache_lock held.
*/
static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode)
{
if (!inode)
return;
assert_spin_locked(&dcache_lock);
spin_lock(&dentry->d_lock);
__fsnotify_update_dcache_flags(dentry);
spin_unlock(&dentry->d_lock);
}
/* called from fsnotify listeners, such as fanotify or dnotify */
/* must call when a group changes its ->mask */
extern void fsnotify_recalc_global_mask(void);
/* get a reference to an existing or create a new group */
extern struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num,
__u32 mask,
const struct fsnotify_ops *ops);
/* run all marks associated with this group and update group->mask */
extern void fsnotify_recalc_group_mask(struct fsnotify_group *group);
/* drop reference on a group from fsnotify_obtain_group */
extern void fsnotify_put_group(struct fsnotify_group *group);
/* take a reference to an event */
extern void fsnotify_get_event(struct fsnotify_event *event);
extern void fsnotify_put_event(struct fsnotify_event *event);
/* find private data previously attached to an event and unlink it */
extern struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnotify_group *group,
struct fsnotify_event *event);
/* attach the event to the group notification queue */
extern int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event,
struct fsnotify_event_private_data *priv);
/* true if the group notification queue is empty */
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
/* return, but do not dequeue the first event on the notification queue */
extern struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group);
/* return AND dequeue the first event on the notification queue */
extern struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group);
/* functions used to manipulate the marks attached to inodes */
/* run all marks associated with an inode and update inode->i_fsnotify_mask */
extern void fsnotify_recalc_inode_mask(struct inode *inode);
extern void fsnotify_init_mark(struct fsnotify_mark_entry *entry, void (*free_mark)(struct fsnotify_mark_entry *entry));
/* find (and take a reference) to a mark associated with group and inode */
extern struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group, struct inode *inode);
/* attach the mark to both the group and the inode */
extern int fsnotify_add_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group, struct inode *inode);
/* given a mark, flag it to be freed when all references are dropped */
extern void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry);
/* run all the marks in a group, and flag them to be freed */
extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group);
extern void fsnotify_get_mark(struct fsnotify_mark_entry *entry);
extern void fsnotify_put_mark(struct fsnotify_mark_entry *entry);
extern void fsnotify_unmount_inodes(struct list_head *list);
/* put here because inotify does some weird stuff when destroying watches */
extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
void *data, int data_is, const char *name,
u32 cookie);
#else
static inline void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
const char *name, u32 cookie)
{}
static inline void __fsnotify_parent(struct dentry *dentry, __u32 mask)
{}
static inline void __fsnotify_inode_delete(struct inode *inode)
{}
static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
{}
static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode)
{}
static inline u32 fsnotify_get_cookie(void)
{
return 0;
}
static inline void fsnotify_unmount_inodes(struct list_head *list)
{}
#endif /* CONFIG_FSNOTIFY */
#endif /* __KERNEL __ */
#endif /* __LINUX_FSNOTIFY_BACKEND_H */
...@@ -302,7 +302,8 @@ config AUDITSYSCALL ...@@ -302,7 +302,8 @@ config AUDITSYSCALL
config AUDIT_TREE config AUDIT_TREE
def_bool y def_bool y
depends on AUDITSYSCALL && INOTIFY depends on AUDITSYSCALL
select INOTIFY
menu "RCU Subsystem" menu "RCU Subsystem"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment