Commit 0fb7b6f9 authored by Peter Zijlstra's avatar Peter Zijlstra

Merge branch 'driver-core/driver-core-next'

Pull in dependent cgroup patches
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
parents 7e18e42e 61742a7c
...@@ -310,7 +310,6 @@ IOMAP ...@@ -310,7 +310,6 @@ IOMAP
devm_ioremap() devm_ioremap()
devm_ioremap_uc() devm_ioremap_uc()
devm_ioremap_wc() devm_ioremap_wc()
devm_ioremap_np()
devm_ioremap_resource() : checks resource, requests memory region, ioremaps devm_ioremap_resource() : checks resource, requests memory region, ioremaps
devm_ioremap_resource_wc() devm_ioremap_resource_wc()
devm_platform_ioremap_resource() : calls devm_ioremap_resource() for platform device devm_platform_ioremap_resource() : calls devm_ioremap_resource() for platform device
......
...@@ -260,7 +260,7 @@ EXPORT_SYMBOL_GPL(__class_create); ...@@ -260,7 +260,7 @@ EXPORT_SYMBOL_GPL(__class_create);
*/ */
void class_destroy(struct class *cls) void class_destroy(struct class *cls)
{ {
if ((cls == NULL) || (IS_ERR(cls))) if (IS_ERR_OR_NULL(cls))
return; return;
class_unregister(cls); class_unregister(cls);
......
...@@ -2509,7 +2509,7 @@ static ssize_t uevent_store(struct device *dev, struct device_attribute *attr, ...@@ -2509,7 +2509,7 @@ static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
rc = kobject_synth_uevent(&dev->kobj, buf, count); rc = kobject_synth_uevent(&dev->kobj, buf, count);
if (rc) { if (rc) {
dev_err(dev, "uevent: failed to send synthetic uevent\n"); dev_err(dev, "uevent: failed to send synthetic uevent: %d\n", rc);
return rc; return rc;
} }
......
...@@ -836,7 +836,7 @@ static int __init save_async_options(char *buf) ...@@ -836,7 +836,7 @@ static int __init save_async_options(char *buf)
if (strlen(buf) >= ASYNC_DRV_NAMES_MAX_LEN) if (strlen(buf) >= ASYNC_DRV_NAMES_MAX_LEN)
pr_warn("Too long list of driver names for 'driver_async_probe'!\n"); pr_warn("Too long list of driver names for 'driver_async_probe'!\n");
strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN); strscpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN);
async_probe_default = parse_option_str(async_probe_drv_names, "*"); async_probe_default = parse_option_str(async_probe_drv_names, "*");
return 1; return 1;
......
...@@ -117,7 +117,9 @@ static __always_inline struct devres * alloc_dr(dr_release_t release, ...@@ -117,7 +117,9 @@ static __always_inline struct devres * alloc_dr(dr_release_t release,
if (unlikely(!dr)) if (unlikely(!dr))
return NULL; return NULL;
memset(dr, 0, offsetof(struct devres, data)); /* No need to clear memory twice */
if (!(gfp & __GFP_ZERO))
memset(dr, 0, offsetof(struct devres, data));
INIT_LIST_HEAD(&dr->node.entry); INIT_LIST_HEAD(&dr->node.entry);
dr->node.release = release; dr->node.release = release;
......
...@@ -472,6 +472,16 @@ static void kernfs_drain(struct kernfs_node *kn) ...@@ -472,6 +472,16 @@ static void kernfs_drain(struct kernfs_node *kn)
lockdep_assert_held_write(&root->kernfs_rwsem); lockdep_assert_held_write(&root->kernfs_rwsem);
WARN_ON_ONCE(kernfs_active(kn)); WARN_ON_ONCE(kernfs_active(kn));
/*
* Skip draining if already fully drained. This avoids draining and its
* lockdep annotations for nodes which have never been activated
* allowing embedding kernfs_remove() in create error paths without
* worrying about draining.
*/
if (atomic_read(&kn->active) == KN_DEACTIVATED_BIAS &&
!kernfs_should_drain_open_files(kn))
return;
up_write(&root->kernfs_rwsem); up_write(&root->kernfs_rwsem);
if (kernfs_lockdep(kn)) { if (kernfs_lockdep(kn)) {
...@@ -480,7 +490,6 @@ static void kernfs_drain(struct kernfs_node *kn) ...@@ -480,7 +490,6 @@ static void kernfs_drain(struct kernfs_node *kn)
lock_contended(&kn->dep_map, _RET_IP_); lock_contended(&kn->dep_map, _RET_IP_);
} }
/* but everyone should wait for draining */
wait_event(root->deactivate_waitq, wait_event(root->deactivate_waitq,
atomic_read(&kn->active) == KN_DEACTIVATED_BIAS); atomic_read(&kn->active) == KN_DEACTIVATED_BIAS);
...@@ -489,7 +498,8 @@ static void kernfs_drain(struct kernfs_node *kn) ...@@ -489,7 +498,8 @@ static void kernfs_drain(struct kernfs_node *kn)
rwsem_release(&kn->dep_map, _RET_IP_); rwsem_release(&kn->dep_map, _RET_IP_);
} }
kernfs_drain_open_files(kn); if (kernfs_should_drain_open_files(kn))
kernfs_drain_open_files(kn);
down_write(&root->kernfs_rwsem); down_write(&root->kernfs_rwsem);
} }
...@@ -695,13 +705,7 @@ struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root, ...@@ -695,13 +705,7 @@ struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
goto err_unlock; goto err_unlock;
} }
/* if (unlikely(!kernfs_active(kn) || !atomic_inc_not_zero(&kn->count)))
* ACTIVATED is protected with kernfs_mutex but it was clear when
* @kn was added to idr and we just wanna see it set. No need to
* grab kernfs_mutex.
*/
if (unlikely(!(kn->flags & KERNFS_ACTIVATED) ||
!atomic_inc_not_zero(&kn->count)))
goto err_unlock; goto err_unlock;
spin_unlock(&kernfs_idr_lock); spin_unlock(&kernfs_idr_lock);
...@@ -743,10 +747,7 @@ int kernfs_add_one(struct kernfs_node *kn) ...@@ -743,10 +747,7 @@ int kernfs_add_one(struct kernfs_node *kn)
goto out_unlock; goto out_unlock;
ret = -ENOENT; ret = -ENOENT;
if (parent->flags & KERNFS_EMPTY_DIR) if (parent->flags & (KERNFS_REMOVING | KERNFS_EMPTY_DIR))
goto out_unlock;
if ((parent->flags & KERNFS_ACTIVATED) && !kernfs_active(parent))
goto out_unlock; goto out_unlock;
kn->hash = kernfs_name_hash(kn->name, kn->ns); kn->hash = kernfs_name_hash(kn->name, kn->ns);
...@@ -1304,6 +1305,21 @@ static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos, ...@@ -1304,6 +1305,21 @@ static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
return pos->parent; return pos->parent;
} }
static void kernfs_activate_one(struct kernfs_node *kn)
{
lockdep_assert_held_write(&kernfs_root(kn)->kernfs_rwsem);
kn->flags |= KERNFS_ACTIVATED;
if (kernfs_active(kn) || (kn->flags & (KERNFS_HIDDEN | KERNFS_REMOVING)))
return;
WARN_ON_ONCE(kn->parent && RB_EMPTY_NODE(&kn->rb));
WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS);
atomic_sub(KN_DEACTIVATED_BIAS, &kn->active);
}
/** /**
* kernfs_activate - activate a node which started deactivated * kernfs_activate - activate a node which started deactivated
* @kn: kernfs_node whose subtree is to be activated * @kn: kernfs_node whose subtree is to be activated
...@@ -1325,15 +1341,42 @@ void kernfs_activate(struct kernfs_node *kn) ...@@ -1325,15 +1341,42 @@ void kernfs_activate(struct kernfs_node *kn)
down_write(&root->kernfs_rwsem); down_write(&root->kernfs_rwsem);
pos = NULL; pos = NULL;
while ((pos = kernfs_next_descendant_post(pos, kn))) { while ((pos = kernfs_next_descendant_post(pos, kn)))
if (pos->flags & KERNFS_ACTIVATED) kernfs_activate_one(pos);
continue;
up_write(&root->kernfs_rwsem);
}
WARN_ON_ONCE(pos->parent && RB_EMPTY_NODE(&pos->rb)); /**
WARN_ON_ONCE(atomic_read(&pos->active) != KN_DEACTIVATED_BIAS); * kernfs_show - show or hide a node
* @kn: kernfs_node to show or hide
* @show: whether to show or hide
*
* If @show is %false, @kn is marked hidden and deactivated. A hidden node is
* ignored in future activaitons. If %true, the mark is removed and activation
* state is restored. This function won't implicitly activate a new node in a
* %KERNFS_ROOT_CREATE_DEACTIVATED root which hasn't been activated yet.
*
* To avoid recursion complexities, directories aren't supported for now.
*/
void kernfs_show(struct kernfs_node *kn, bool show)
{
struct kernfs_root *root = kernfs_root(kn);
atomic_sub(KN_DEACTIVATED_BIAS, &pos->active); if (WARN_ON_ONCE(kernfs_type(kn) == KERNFS_DIR))
pos->flags |= KERNFS_ACTIVATED; return;
down_write(&root->kernfs_rwsem);
if (show) {
kn->flags &= ~KERNFS_HIDDEN;
if (kn->flags & KERNFS_ACTIVATED)
kernfs_activate_one(kn);
} else {
kn->flags |= KERNFS_HIDDEN;
if (kernfs_active(kn))
atomic_add(KN_DEACTIVATED_BIAS, &kn->active);
kernfs_drain(kn);
} }
up_write(&root->kernfs_rwsem); up_write(&root->kernfs_rwsem);
...@@ -1358,34 +1401,27 @@ static void __kernfs_remove(struct kernfs_node *kn) ...@@ -1358,34 +1401,27 @@ static void __kernfs_remove(struct kernfs_node *kn)
pr_debug("kernfs %s: removing\n", kn->name); pr_debug("kernfs %s: removing\n", kn->name);
/* prevent any new usage under @kn by deactivating all nodes */ /* prevent new usage by marking all nodes removing and deactivating */
pos = NULL; pos = NULL;
while ((pos = kernfs_next_descendant_post(pos, kn))) while ((pos = kernfs_next_descendant_post(pos, kn))) {
pos->flags |= KERNFS_REMOVING;
if (kernfs_active(pos)) if (kernfs_active(pos))
atomic_add(KN_DEACTIVATED_BIAS, &pos->active); atomic_add(KN_DEACTIVATED_BIAS, &pos->active);
}
/* deactivate and unlink the subtree node-by-node */ /* deactivate and unlink the subtree node-by-node */
do { do {
pos = kernfs_leftmost_descendant(kn); pos = kernfs_leftmost_descendant(kn);
/* /*
* kernfs_drain() drops kernfs_rwsem temporarily and @pos's * kernfs_drain() may drop kernfs_rwsem temporarily and @pos's
* base ref could have been put by someone else by the time * base ref could have been put by someone else by the time
* the function returns. Make sure it doesn't go away * the function returns. Make sure it doesn't go away
* underneath us. * underneath us.
*/ */
kernfs_get(pos); kernfs_get(pos);
/* kernfs_drain(pos);
* Drain iff @kn was activated. This avoids draining and
* its lockdep annotations for nodes which have never been
* activated and allows embedding kernfs_remove() in create
* error paths without worrying about draining.
*/
if (kn->flags & KERNFS_ACTIVATED)
kernfs_drain(pos);
else
WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS);
/* /*
* kernfs_unlink_sibling() succeeds once per node. Use it * kernfs_unlink_sibling() succeeds once per node. Use it
......
...@@ -23,6 +23,8 @@ struct kernfs_open_node { ...@@ -23,6 +23,8 @@ struct kernfs_open_node {
atomic_t event; atomic_t event;
wait_queue_head_t poll; wait_queue_head_t poll;
struct list_head files; /* goes through kernfs_open_file.list */ struct list_head files; /* goes through kernfs_open_file.list */
unsigned int nr_mmapped;
unsigned int nr_to_release;
}; };
/* /*
...@@ -57,31 +59,17 @@ static inline struct mutex *kernfs_open_file_mutex_lock(struct kernfs_node *kn) ...@@ -57,31 +59,17 @@ static inline struct mutex *kernfs_open_file_mutex_lock(struct kernfs_node *kn)
} }
/** /**
* kernfs_deref_open_node - Get kernfs_open_node corresponding to @kn. * of_on - Return the kernfs_open_node of the specified kernfs_open_file
* * @of: taret kernfs_open_file
* @of: associated kernfs_open_file instance.
* @kn: target kernfs_node.
*
* Fetch and return ->attr.open of @kn if @of->list is non empty.
* If @of->list is not empty we can safely assume that @of is on
* @kn->attr.open->files list and this guarantees that @kn->attr.open
* will not vanish i.e. dereferencing outside RCU read-side critical
* section is safe here.
*
* The caller needs to make sure that @of->list is not empty.
*/ */
static struct kernfs_open_node * static struct kernfs_open_node *of_on(struct kernfs_open_file *of)
kernfs_deref_open_node(struct kernfs_open_file *of, struct kernfs_node *kn)
{ {
struct kernfs_open_node *on; return rcu_dereference_protected(of->kn->attr.open,
!list_empty(&of->list));
on = rcu_dereference_check(kn->attr.open, !list_empty(&of->list));
return on;
} }
/** /**
* kernfs_deref_open_node_protected - Get kernfs_open_node corresponding to @kn * kernfs_deref_open_node_locked - Get kernfs_open_node corresponding to @kn
* *
* @kn: target kernfs_node. * @kn: target kernfs_node.
* *
...@@ -96,7 +84,7 @@ kernfs_deref_open_node(struct kernfs_open_file *of, struct kernfs_node *kn) ...@@ -96,7 +84,7 @@ kernfs_deref_open_node(struct kernfs_open_file *of, struct kernfs_node *kn)
* The caller needs to make sure that kernfs_open_file_mutex is held. * The caller needs to make sure that kernfs_open_file_mutex is held.
*/ */
static struct kernfs_open_node * static struct kernfs_open_node *
kernfs_deref_open_node_protected(struct kernfs_node *kn) kernfs_deref_open_node_locked(struct kernfs_node *kn)
{ {
return rcu_dereference_protected(kn->attr.open, return rcu_dereference_protected(kn->attr.open,
lockdep_is_held(kernfs_open_file_mutex_ptr(kn))); lockdep_is_held(kernfs_open_file_mutex_ptr(kn)));
...@@ -207,12 +195,8 @@ static void kernfs_seq_stop(struct seq_file *sf, void *v) ...@@ -207,12 +195,8 @@ static void kernfs_seq_stop(struct seq_file *sf, void *v)
static int kernfs_seq_show(struct seq_file *sf, void *v) static int kernfs_seq_show(struct seq_file *sf, void *v)
{ {
struct kernfs_open_file *of = sf->private; struct kernfs_open_file *of = sf->private;
struct kernfs_open_node *on = kernfs_deref_open_node(of, of->kn);
if (!on)
return -EINVAL;
of->event = atomic_read(&on->event); of->event = atomic_read(&of_on(of)->event);
return of->kn->attr.ops->seq_show(sf, v); return of->kn->attr.ops->seq_show(sf, v);
} }
...@@ -235,7 +219,6 @@ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) ...@@ -235,7 +219,6 @@ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
struct kernfs_open_file *of = kernfs_of(iocb->ki_filp); struct kernfs_open_file *of = kernfs_of(iocb->ki_filp);
ssize_t len = min_t(size_t, iov_iter_count(iter), PAGE_SIZE); ssize_t len = min_t(size_t, iov_iter_count(iter), PAGE_SIZE);
const struct kernfs_ops *ops; const struct kernfs_ops *ops;
struct kernfs_open_node *on;
char *buf; char *buf;
buf = of->prealloc_buf; buf = of->prealloc_buf;
...@@ -257,14 +240,7 @@ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) ...@@ -257,14 +240,7 @@ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
goto out_free; goto out_free;
} }
on = kernfs_deref_open_node(of, of->kn); of->event = atomic_read(&of_on(of)->event);
if (!on) {
len = -EINVAL;
mutex_unlock(&of->mutex);
goto out_free;
}
of->event = atomic_read(&on->event);
ops = kernfs_ops(of->kn); ops = kernfs_ops(of->kn);
if (ops->read) if (ops->read)
...@@ -553,6 +529,7 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -553,6 +529,7 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
rc = 0; rc = 0;
of->mmapped = true; of->mmapped = true;
of_on(of)->nr_mmapped++;
of->vm_ops = vma->vm_ops; of->vm_ops = vma->vm_ops;
vma->vm_ops = &kernfs_vm_ops; vma->vm_ops = &kernfs_vm_ops;
out_put: out_put:
...@@ -580,31 +557,30 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -580,31 +557,30 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
static int kernfs_get_open_node(struct kernfs_node *kn, static int kernfs_get_open_node(struct kernfs_node *kn,
struct kernfs_open_file *of) struct kernfs_open_file *of)
{ {
struct kernfs_open_node *on, *new_on = NULL; struct kernfs_open_node *on;
struct mutex *mutex = NULL; struct mutex *mutex;
mutex = kernfs_open_file_mutex_lock(kn); mutex = kernfs_open_file_mutex_lock(kn);
on = kernfs_deref_open_node_protected(kn); on = kernfs_deref_open_node_locked(kn);
if (on) { if (!on) {
list_add_tail(&of->list, &on->files);
mutex_unlock(mutex);
return 0;
} else {
/* not there, initialize a new one */ /* not there, initialize a new one */
new_on = kmalloc(sizeof(*new_on), GFP_KERNEL); on = kzalloc(sizeof(*on), GFP_KERNEL);
if (!new_on) { if (!on) {
mutex_unlock(mutex); mutex_unlock(mutex);
return -ENOMEM; return -ENOMEM;
} }
atomic_set(&new_on->event, 1); atomic_set(&on->event, 1);
init_waitqueue_head(&new_on->poll); init_waitqueue_head(&on->poll);
INIT_LIST_HEAD(&new_on->files); INIT_LIST_HEAD(&on->files);
list_add_tail(&of->list, &new_on->files); rcu_assign_pointer(kn->attr.open, on);
rcu_assign_pointer(kn->attr.open, new_on);
} }
mutex_unlock(mutex);
list_add_tail(&of->list, &on->files);
if (kn->flags & KERNFS_HAS_RELEASE)
on->nr_to_release++;
mutex_unlock(mutex);
return 0; return 0;
} }
...@@ -613,6 +589,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn, ...@@ -613,6 +589,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
* *
* @kn: target kernfs_node * @kn: target kernfs_node
* @of: associated kernfs_open_file * @of: associated kernfs_open_file
* @open_failed: ->open() failed, cancel ->release()
* *
* Unlink @of from list of @kn's associated open files. If list of * Unlink @of from list of @kn's associated open files. If list of
* associated open files becomes empty, disassociate and free * associated open files becomes empty, disassociate and free
...@@ -622,21 +599,30 @@ static int kernfs_get_open_node(struct kernfs_node *kn, ...@@ -622,21 +599,30 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
* None. * None.
*/ */
static void kernfs_unlink_open_file(struct kernfs_node *kn, static void kernfs_unlink_open_file(struct kernfs_node *kn,
struct kernfs_open_file *of) struct kernfs_open_file *of,
bool open_failed)
{ {
struct kernfs_open_node *on; struct kernfs_open_node *on;
struct mutex *mutex = NULL; struct mutex *mutex;
mutex = kernfs_open_file_mutex_lock(kn); mutex = kernfs_open_file_mutex_lock(kn);
on = kernfs_deref_open_node_protected(kn); on = kernfs_deref_open_node_locked(kn);
if (!on) { if (!on) {
mutex_unlock(mutex); mutex_unlock(mutex);
return; return;
} }
if (of) if (of) {
if (kn->flags & KERNFS_HAS_RELEASE) {
WARN_ON_ONCE(of->released == open_failed);
if (open_failed)
on->nr_to_release--;
}
if (of->mmapped)
on->nr_mmapped--;
list_del(&of->list); list_del(&of->list);
}
if (list_empty(&on->files)) { if (list_empty(&on->files)) {
rcu_assign_pointer(kn->attr.open, NULL); rcu_assign_pointer(kn->attr.open, NULL);
...@@ -763,7 +749,7 @@ static int kernfs_fop_open(struct inode *inode, struct file *file) ...@@ -763,7 +749,7 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
return 0; return 0;
err_put_node: err_put_node:
kernfs_unlink_open_file(kn, of); kernfs_unlink_open_file(kn, of, true);
err_seq_release: err_seq_release:
seq_release(inode, file); seq_release(inode, file);
err_free: err_free:
...@@ -795,6 +781,7 @@ static void kernfs_release_file(struct kernfs_node *kn, ...@@ -795,6 +781,7 @@ static void kernfs_release_file(struct kernfs_node *kn,
*/ */
kn->attr.ops->release(of); kn->attr.ops->release(of);
of->released = true; of->released = true;
of_on(of)->nr_to_release--;
} }
} }
...@@ -802,15 +789,16 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp) ...@@ -802,15 +789,16 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp)
{ {
struct kernfs_node *kn = inode->i_private; struct kernfs_node *kn = inode->i_private;
struct kernfs_open_file *of = kernfs_of(filp); struct kernfs_open_file *of = kernfs_of(filp);
struct mutex *mutex = NULL;
if (kn->flags & KERNFS_HAS_RELEASE) { if (kn->flags & KERNFS_HAS_RELEASE) {
struct mutex *mutex;
mutex = kernfs_open_file_mutex_lock(kn); mutex = kernfs_open_file_mutex_lock(kn);
kernfs_release_file(kn, of); kernfs_release_file(kn, of);
mutex_unlock(mutex); mutex_unlock(mutex);
} }
kernfs_unlink_open_file(kn, of); kernfs_unlink_open_file(kn, of, false);
seq_release(inode, filp); seq_release(inode, filp);
kfree(of->prealloc_buf); kfree(of->prealloc_buf);
kfree(of); kfree(of);
...@@ -818,28 +806,33 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp) ...@@ -818,28 +806,33 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp)
return 0; return 0;
} }
void kernfs_drain_open_files(struct kernfs_node *kn) bool kernfs_should_drain_open_files(struct kernfs_node *kn)
{ {
struct kernfs_open_node *on; struct kernfs_open_node *on;
struct kernfs_open_file *of; bool ret;
struct mutex *mutex = NULL;
if (!(kn->flags & (KERNFS_HAS_MMAP | KERNFS_HAS_RELEASE)))
return;
/* /*
* lockless opportunistic check is safe below because no one is adding to * @kn being deactivated guarantees that @kn->attr.open can't change
* ->attr.open at this point of time. This check allows early bail out * beneath us making the lockless test below safe.
* if ->attr.open is already NULL. kernfs_unlink_open_file makes
* ->attr.open NULL only while holding kernfs_open_file_mutex so below
* check under kernfs_open_file_mutex_ptr(kn) will ensure bailing out if
* ->attr.open became NULL while waiting for the mutex.
*/ */
if (!rcu_access_pointer(kn->attr.open)) WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS);
return;
rcu_read_lock();
on = rcu_dereference(kn->attr.open);
ret = on && (on->nr_mmapped || on->nr_to_release);
rcu_read_unlock();
return ret;
}
void kernfs_drain_open_files(struct kernfs_node *kn)
{
struct kernfs_open_node *on;
struct kernfs_open_file *of;
struct mutex *mutex;
mutex = kernfs_open_file_mutex_lock(kn); mutex = kernfs_open_file_mutex_lock(kn);
on = kernfs_deref_open_node_protected(kn); on = kernfs_deref_open_node_locked(kn);
if (!on) { if (!on) {
mutex_unlock(mutex); mutex_unlock(mutex);
return; return;
...@@ -848,13 +841,17 @@ void kernfs_drain_open_files(struct kernfs_node *kn) ...@@ -848,13 +841,17 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
list_for_each_entry(of, &on->files, list) { list_for_each_entry(of, &on->files, list) {
struct inode *inode = file_inode(of->file); struct inode *inode = file_inode(of->file);
if (kn->flags & KERNFS_HAS_MMAP) if (of->mmapped) {
unmap_mapping_range(inode->i_mapping, 0, 0, 1); unmap_mapping_range(inode->i_mapping, 0, 0, 1);
of->mmapped = false;
on->nr_mmapped--;
}
if (kn->flags & KERNFS_HAS_RELEASE) if (kn->flags & KERNFS_HAS_RELEASE)
kernfs_release_file(kn, of); kernfs_release_file(kn, of);
} }
WARN_ON_ONCE(on->nr_mmapped || on->nr_to_release);
mutex_unlock(mutex); mutex_unlock(mutex);
} }
...@@ -874,11 +871,7 @@ void kernfs_drain_open_files(struct kernfs_node *kn) ...@@ -874,11 +871,7 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
*/ */
__poll_t kernfs_generic_poll(struct kernfs_open_file *of, poll_table *wait) __poll_t kernfs_generic_poll(struct kernfs_open_file *of, poll_table *wait)
{ {
struct kernfs_node *kn = kernfs_dentry_node(of->file->f_path.dentry); struct kernfs_open_node *on = of_on(of);
struct kernfs_open_node *on = kernfs_deref_open_node(of, kn);
if (!on)
return EPOLLERR;
poll_wait(of->file, &on->poll, wait); poll_wait(of->file, &on->poll, wait);
......
...@@ -157,6 +157,7 @@ struct kernfs_node *kernfs_new_node(struct kernfs_node *parent, ...@@ -157,6 +157,7 @@ struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
*/ */
extern const struct file_operations kernfs_file_fops; extern const struct file_operations kernfs_file_fops;
bool kernfs_should_drain_open_files(struct kernfs_node *kn);
void kernfs_drain_open_files(struct kernfs_node *kn); void kernfs_drain_open_files(struct kernfs_node *kn);
/* /*
......
...@@ -114,6 +114,7 @@ int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); ...@@ -114,6 +114,7 @@ int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_rm_cftypes(struct cftype *cfts); int cgroup_rm_cftypes(struct cftype *cfts);
void cgroup_file_notify(struct cgroup_file *cfile); void cgroup_file_notify(struct cgroup_file *cfile);
void cgroup_file_show(struct cgroup_file *cfile, bool show);
int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
......
...@@ -59,8 +59,6 @@ void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset, ...@@ -59,8 +59,6 @@ void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
resource_size_t size); resource_size_t size);
void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset, void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
resource_size_t size); resource_size_t size);
void __iomem *devm_ioremap_np(struct device *dev, resource_size_t offset,
resource_size_t size);
void devm_iounmap(struct device *dev, void __iomem *addr); void devm_iounmap(struct device *dev, void __iomem *addr);
int check_signature(const volatile void __iomem *io_addr, int check_signature(const volatile void __iomem *io_addr,
const unsigned char *signature, int length); const unsigned char *signature, int length);
......
...@@ -108,10 +108,12 @@ enum kernfs_node_flag { ...@@ -108,10 +108,12 @@ enum kernfs_node_flag {
KERNFS_HAS_SEQ_SHOW = 0x0040, KERNFS_HAS_SEQ_SHOW = 0x0040,
KERNFS_HAS_MMAP = 0x0080, KERNFS_HAS_MMAP = 0x0080,
KERNFS_LOCKDEP = 0x0100, KERNFS_LOCKDEP = 0x0100,
KERNFS_HIDDEN = 0x0200,
KERNFS_SUICIDAL = 0x0400, KERNFS_SUICIDAL = 0x0400,
KERNFS_SUICIDED = 0x0800, KERNFS_SUICIDED = 0x0800,
KERNFS_EMPTY_DIR = 0x1000, KERNFS_EMPTY_DIR = 0x1000,
KERNFS_HAS_RELEASE = 0x2000, KERNFS_HAS_RELEASE = 0x2000,
KERNFS_REMOVING = 0x4000,
}; };
/* @flags for kernfs_create_root() */ /* @flags for kernfs_create_root() */
...@@ -429,6 +431,7 @@ struct kernfs_node *kernfs_create_link(struct kernfs_node *parent, ...@@ -429,6 +431,7 @@ struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
const char *name, const char *name,
struct kernfs_node *target); struct kernfs_node *target);
void kernfs_activate(struct kernfs_node *kn); void kernfs_activate(struct kernfs_node *kn);
void kernfs_show(struct kernfs_node *kn, bool show);
void kernfs_remove(struct kernfs_node *kn); void kernfs_remove(struct kernfs_node *kn);
void kernfs_break_active_protection(struct kernfs_node *kn); void kernfs_break_active_protection(struct kernfs_node *kn);
void kernfs_unbreak_active_protection(struct kernfs_node *kn); void kernfs_unbreak_active_protection(struct kernfs_node *kn);
......
...@@ -4371,6 +4371,26 @@ void cgroup_file_notify(struct cgroup_file *cfile) ...@@ -4371,6 +4371,26 @@ void cgroup_file_notify(struct cgroup_file *cfile)
spin_unlock_irqrestore(&cgroup_file_kn_lock, flags); spin_unlock_irqrestore(&cgroup_file_kn_lock, flags);
} }
/**
* cgroup_file_show - show or hide a hidden cgroup file
* @cfile: target cgroup_file obtained by setting cftype->file_offset
* @show: whether to show or hide
*/
void cgroup_file_show(struct cgroup_file *cfile, bool show)
{
struct kernfs_node *kn;
spin_lock_irq(&cgroup_file_kn_lock);
kn = cfile->kn;
kernfs_get(kn);
spin_unlock_irq(&cgroup_file_kn_lock);
if (kn)
kernfs_show(kn, show);
kernfs_put(kn);
}
/** /**
* css_next_child - find the next child of a given css * css_next_child - find the next child of a given css
* @pos: the current position (%NULL to initiate traversal) * @pos: the current position (%NULL to initiate traversal)
......
...@@ -103,21 +103,6 @@ void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset, ...@@ -103,21 +103,6 @@ void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
} }
EXPORT_SYMBOL(devm_ioremap_wc); EXPORT_SYMBOL(devm_ioremap_wc);
/**
* devm_ioremap_np - Managed ioremap_np()
* @dev: Generic device to remap IO address for
* @offset: Resource address to map
* @size: Size of map
*
* Managed ioremap_np(). Map is automatically unmapped on driver detach.
*/
void __iomem *devm_ioremap_np(struct device *dev, resource_size_t offset,
resource_size_t size)
{
return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_NP);
}
EXPORT_SYMBOL(devm_ioremap_np);
/** /**
* devm_iounmap - Managed iounmap() * devm_iounmap - Managed iounmap()
* @dev: Generic device to unmap for * @dev: Generic device to unmap for
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment