Commit 8c1ce9d6 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] rcu: avoid passing an argument to the callback function

From: Dipankar Sarma <dipankar@in.ibm.com>

This patch changes the call_rcu() API and avoids passing an argument to the
callback function as suggested by Rusty.  Instead, it is assumed that the
user has embedded the rcu head into a structure that is useful in the
callback and the rcu_head pointer is passed to the callback.  The callback
can use container_of() to get the pointer to its structure and work with
it.  Together with the rcu-singly-link patch, it reduces the rcu_head size
by 50%.  Considering that we use these in things like struct dentry and
struct dst_entry, this is good savings in space.

An example :

struct my_struct {
	struct rcu_head rcu;
	int x;
	int y;
};

void my_rcu_callback(struct rcu_head *head)
{
	struct my_struct *p = container_of(head, struct my_struct, rcu);
	free(p);
}

void my_delete(struct my_struct *p)
{
	...
	call_rcu(&p->rcu, my_rcu_callback);
	...
}
Signed-Off-By: default avatarDipankar Sarma <dipankar@in.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent b659a6fb
......@@ -158,9 +158,10 @@ void pte_free_now(struct page *ptepage)
pte_free(ptepage);
}
static void pte_free_rcu_callback(void *arg)
static void pte_free_rcu_callback(struct rcu_head *head)
{
struct pte_freelist_batch *batch = arg;
struct pte_freelist_batch *batch =
container_of(head, struct pte_freelist_batch, rcu);
unsigned int i;
for (i = 0; i < batch->index; i++)
......@@ -171,7 +172,7 @@ static void pte_free_rcu_callback(void *arg)
void pte_free_submit(struct pte_freelist_batch *batch)
{
INIT_RCU_HEAD(&batch->rcu);
call_rcu(&batch->rcu, pte_free_rcu_callback, batch);
call_rcu(&batch->rcu, pte_free_rcu_callback);
}
void pte_free_finish(void)
......
......@@ -65,9 +65,9 @@ struct dentry_stat_t dentry_stat = {
.age_limit = 45,
};
static void d_callback(void *arg)
static void d_callback(struct rcu_head *head)
{
struct dentry * dentry = (struct dentry *)arg;
struct dentry * dentry = container_of(head, struct dentry, d_rcu);
if (dname_external(dentry))
kfree(dentry->d_name.name);
......@@ -82,7 +82,7 @@ static void d_free(struct dentry *dentry)
{
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
call_rcu(&dentry->d_rcu, d_callback, dentry);
call_rcu(&dentry->d_rcu, d_callback);
}
/*
......
......@@ -46,18 +46,16 @@
* struct rcu_head - callback structure for use with RCU
* @next: next update requests in a list
* @func: actual update function to call after the grace period.
* @arg: argument to be passed to the actual update function.
*/
struct rcu_head {
struct rcu_head *next;
void (*func)(void *obj);
void *arg;
void (*func)(struct rcu_head *head);
};
#define RCU_HEAD_INIT(head) { .next = NULL, .func = NULL, .arg = NULL }
#define RCU_HEAD_INIT(head) { .next = NULL, .func = NULL }
#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT(head)
#define INIT_RCU_HEAD(ptr) do { \
(ptr)->next = NULL; (ptr)->func = NULL; (ptr)->arg = NULL; \
(ptr)->next = NULL; (ptr)->func = NULL; \
} while (0)
......@@ -144,7 +142,7 @@ extern void rcu_restart_cpu(int cpu);
/* Exported interfaces */
extern void FASTCALL(call_rcu(struct rcu_head *head,
void (*func)(void *arg), void *arg));
void (*func)(struct rcu_head *head)));
extern void synchronize_kernel(void);
#endif /* __KERNEL__ */
......
......@@ -183,6 +183,12 @@ static inline void dst_free(struct dst_entry * dst)
__dst_free(dst);
}
static inline void dst_rcu_free(struct rcu_head *head)
{
struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
dst_free(dst);
}
static inline void dst_confirm(struct dst_entry *dst)
{
if (dst)
......
......@@ -333,25 +333,40 @@ void* ipc_rcu_alloc(int size)
* Since RCU callback function is called in bh,
* we need to defer the vfree to schedule_work
*/
static void ipc_schedule_free(void* arg)
static void ipc_schedule_free(struct rcu_head *head)
{
struct ipc_rcu_vmalloc *free = arg;
struct ipc_rcu_vmalloc *free =
container_of(head, struct ipc_rcu_vmalloc, rcu);
INIT_WORK(&free->work, vfree, free);
schedule_work(&free->work);
}
/**
* ipc_immediate_free - free ipc + rcu space
*
* Free from the RCU callback context
*
*/
static void ipc_immediate_free(struct rcu_head *head)
{
struct ipc_rcu_kmalloc *free =
container_of(head, struct ipc_rcu_kmalloc, rcu);
kfree(free);
}
void ipc_rcu_free(void* ptr, int size)
{
if (rcu_use_vmalloc(size)) {
struct ipc_rcu_vmalloc *free;
free = ptr - sizeof(*free);
call_rcu(&free->rcu, ipc_schedule_free, free);
call_rcu(&free->rcu, ipc_schedule_free);
} else {
struct ipc_rcu_kmalloc *free;
free = ptr - sizeof(*free);
/* kfree takes a "const void *" so gcc warns. So we cast. */
call_rcu(&free->rcu, (void (*)(void *))kfree, free);
call_rcu(&free->rcu, ipc_immediate_free);
}
}
......
......@@ -177,9 +177,10 @@ static inline int audit_add_rule(struct audit_entry *entry,
return 0;
}
static void audit_free_rule(void *arg)
static void audit_free_rule(struct rcu_head *head)
{
kfree(arg);
struct audit_entry *e = container_of(head, struct audit_entry, rcu);
kfree(e);
}
/* Note that audit_add_rule and audit_del_rule are called via
......@@ -195,7 +196,7 @@ static inline int audit_del_rule(struct audit_rule *rule,
list_for_each_entry(e, list, list) {
if (!audit_compare_rule(rule, &e->rule)) {
list_del_rcu(&e->list);
call_rcu(&e->rcu, audit_free_rule, e);
call_rcu(&e->rcu, audit_free_rule);
return 0;
}
}
......
......@@ -68,20 +68,19 @@ static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
* call_rcu - Queue an RCU update request.
* @head: structure to be used for queueing the RCU updates.
* @func: actual update function to be invoked after the grace period
* @arg: argument to be passed to the update function
*
* The update function will be invoked as soon as all CPUs have performed
* a context switch or been seen in the idle loop or in a user process.
* The read-side of critical section that use call_rcu() for updation must
* be protected by rcu_read_lock()/rcu_read_unlock().
*/
void fastcall call_rcu(struct rcu_head *head, void (*func)(void *arg), void *arg)
void fastcall call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *rcu))
{
int cpu;
unsigned long flags;
head->func = func;
head->arg = arg;
head->next = NULL;
local_irq_save(flags);
cpu = smp_processor_id();
......@@ -100,7 +99,7 @@ static void rcu_do_batch(struct rcu_head *list)
while (list) {
next = list->next;
list->func(list->arg);
list->func(list);
list = next;
}
}
......@@ -358,11 +357,18 @@ void __init rcu_init(void)
register_cpu_notifier(&rcu_nb);
}
struct rcu_synchronize {
struct rcu_head head;
struct completion completion;
};
/* Because of FASTCALL declaration of complete, we use this wrapper */
static void wakeme_after_rcu(void *completion)
static void wakeme_after_rcu(struct rcu_head *head)
{
complete(completion);
struct rcu_synchronize *rcu;
rcu = container_of(head, struct rcu_synchronize, head);
complete(&rcu->completion);
}
/**
......@@ -371,14 +377,14 @@ static void wakeme_after_rcu(void *completion)
*/
void synchronize_kernel(void)
{
struct rcu_head rcu;
DECLARE_COMPLETION(completion);
struct rcu_synchronize rcu;
init_completion(&rcu.completion);
/* Will wake me after RCU finished */
call_rcu(&rcu, wakeme_after_rcu, &completion);
call_rcu(&rcu.head, wakeme_after_rcu);
/* Wait for it */
wait_for_completion(&completion);
wait_for_completion(&rcu.completion);
}
......
......@@ -75,9 +75,8 @@ static int br_initial_port_cost(struct net_device *dev)
return 100; /* assume old 10Mbps */
}
static void destroy_nbp(void *arg)
static void destroy_nbp(struct net_bridge_port *p)
{
struct net_bridge_port *p = arg;
struct net_device *dev = p->dev;
dev->br_port = NULL;
......@@ -88,6 +87,13 @@ static void destroy_nbp(void *arg)
br_sysfs_freeif(p);
}
static void destroy_nbp_rcu(struct rcu_head *head)
{
struct net_bridge_port *p =
container_of(head, struct net_bridge_port, rcu);
destroy_nbp(p);
}
/* called with RTNL */
static void del_nbp(struct net_bridge_port *p)
{
......@@ -108,7 +114,7 @@ static void del_nbp(struct net_bridge_port *p)
del_timer_sync(&p->forward_delay_timer);
del_timer_sync(&p->hold_timer);
call_rcu(&p->rcu, destroy_nbp, p);
call_rcu(&p->rcu, destroy_nbp_rcu);
}
/* called with RTNL */
......
......@@ -146,14 +146,14 @@ static __inline__ unsigned dn_hash(unsigned short src, unsigned short dst)
static inline void dnrt_free(struct dn_route *rt)
{
call_rcu(&rt->u.dst.rcu_head, (void (*)(void *))dst_free, &rt->u.dst);
call_rcu(&rt->u.dst.rcu_head, dst_rcu_free);
}
static inline void dnrt_drop(struct dn_route *rt)
{
if (rt)
dst_release(&rt->u.dst);
call_rcu(&rt->u.dst.rcu_head, (void (*)(void *))dst_free, &rt->u.dst);
call_rcu(&rt->u.dst.rcu_head, dst_rcu_free);
}
static void dn_dst_check_expire(unsigned long dummy)
......
......@@ -439,13 +439,13 @@ static struct file_operations rt_cpu_seq_fops = {
static __inline__ void rt_free(struct rtable *rt)
{
call_rcu(&rt->u.dst.rcu_head, (void (*)(void *))dst_free, &rt->u.dst);
call_rcu(&rt->u.dst.rcu_head, dst_rcu_free);
}
static __inline__ void rt_drop(struct rtable *rt)
{
ip_rt_put(rt);
call_rcu(&rt->u.dst.rcu_head, (void (*)(void *))dst_free, &rt->u.dst);
call_rcu(&rt->u.dst.rcu_head, dst_rcu_free);
}
static __inline__ int rt_fast_clean(struct rtable *rth)
......
......@@ -411,9 +411,9 @@ void qdisc_reset(struct Qdisc *qdisc)
/* this is the rcu callback function to clean up a qdisc when there
* are no further references to it */
static void __qdisc_destroy (void * arg)
static void __qdisc_destroy(struct rcu_head *head)
{
struct Qdisc *qdisc = (struct Qdisc *) arg;
struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
struct Qdisc_ops *ops = qdisc->ops;
#ifdef CONFIG_NET_ESTIMATOR
......@@ -448,7 +448,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
}
}
call_rcu(&qdisc->q_rcu, __qdisc_destroy, qdisc);
call_rcu(&qdisc->q_rcu, __qdisc_destroy);
}
......
......@@ -134,9 +134,9 @@ struct sel_netif *sel_netif_lookup(struct net_device *dev)
return netif;
}
static void sel_netif_free(void *p)
static void sel_netif_free(struct rcu_head *p)
{
struct sel_netif *netif = p;
struct sel_netif *netif = container_of(p, struct sel_netif, rcu_head);
DEBUGP("%s: %s\n", __FUNCTION__, netif->nsec.dev->name);
kfree(netif);
......@@ -151,7 +151,7 @@ static void sel_netif_destroy(struct sel_netif *netif)
sel_netif_total--;
spin_unlock_bh(&sel_netif_lock);
call_rcu(&netif->rcu_head, sel_netif_free, netif);
call_rcu(&netif->rcu_head, sel_netif_free);
}
void sel_netif_put(struct sel_netif *netif)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment