Commit 8c15a9e8 authored by Uladzislau Rezki (Sony)'s avatar Uladzislau Rezki (Sony) Committed by Paul E. McKenney

rcu/kvfree: Move bulk/list reclaim to separate functions

The kvfree_rcu() code maintains lists of pages of pointers, but also a
singly linked list, with the latter being used when memory allocation
fails.  Traversal of these two types of lists is currently open coded.
This commit simplifies the code by providing kvfree_rcu_bulk() and
kvfree_rcu_list() functions, respectively, to traverse these two types
of lists.  This patch does not introduce any functional change.
Signed-off-by: default avatarUladzislau Rezki (Sony) <urezki@gmail.com>
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent 27538e18
...@@ -3029,52 +3029,28 @@ drain_page_cache(struct kfree_rcu_cpu *krcp) ...@@ -3029,52 +3029,28 @@ drain_page_cache(struct kfree_rcu_cpu *krcp)
return freed; return freed;
} }
/* static void
* This function is invoked in workqueue context after a grace period. kvfree_rcu_bulk(struct kfree_rcu_cpu *krcp,
* It frees all the objects queued on ->bulk_head_free or ->head_free. struct kvfree_rcu_bulk_data *bnode, int idx)
*/
static void kfree_rcu_work(struct work_struct *work)
{ {
unsigned long flags; unsigned long flags;
struct kvfree_rcu_bulk_data *bnode, *n; int i;
struct list_head bulk_head[FREE_N_CHANNELS];
struct rcu_head *head, *next;
struct kfree_rcu_cpu *krcp;
struct kfree_rcu_cpu_work *krwp;
int i, j;
krwp = container_of(to_rcu_work(work),
struct kfree_rcu_cpu_work, rcu_work);
krcp = krwp->krcp;
raw_spin_lock_irqsave(&krcp->lock, flags);
// Channels 1 and 2.
for (i = 0; i < FREE_N_CHANNELS; i++)
list_replace_init(&krwp->bulk_head_free[i], &bulk_head[i]);
// Channel 3.
head = krwp->head_free;
krwp->head_free = NULL;
raw_spin_unlock_irqrestore(&krcp->lock, flags);
// Handle the first two channels.
for (i = 0; i < FREE_N_CHANNELS; i++) {
list_for_each_entry_safe(bnode, n, &bulk_head[i], list) {
debug_rcu_bhead_unqueue(bnode); debug_rcu_bhead_unqueue(bnode);
rcu_lock_acquire(&rcu_callback_map); rcu_lock_acquire(&rcu_callback_map);
if (i == 0) { // kmalloc() / kfree(). if (idx == 0) { // kmalloc() / kfree().
trace_rcu_invoke_kfree_bulk_callback( trace_rcu_invoke_kfree_bulk_callback(
rcu_state.name, bnode->nr_records, rcu_state.name, bnode->nr_records,
bnode->records); bnode->records);
kfree_bulk(bnode->nr_records, bnode->records); kfree_bulk(bnode->nr_records, bnode->records);
} else { // vmalloc() / vfree(). } else { // vmalloc() / vfree().
for (j = 0; j < bnode->nr_records; j++) { for (i = 0; i < bnode->nr_records; i++) {
trace_rcu_invoke_kvfree_callback( trace_rcu_invoke_kvfree_callback(
rcu_state.name, bnode->records[j], 0); rcu_state.name, bnode->records[i], 0);
vfree(bnode->records[j]); vfree(bnode->records[i]);
} }
} }
rcu_lock_release(&rcu_callback_map); rcu_lock_release(&rcu_callback_map);
...@@ -3088,16 +3064,13 @@ static void kfree_rcu_work(struct work_struct *work) ...@@ -3088,16 +3064,13 @@ static void kfree_rcu_work(struct work_struct *work)
free_page((unsigned long) bnode); free_page((unsigned long) bnode);
cond_resched_tasks_rcu_qs(); cond_resched_tasks_rcu_qs();
} }
}
static void
kvfree_rcu_list(struct rcu_head *head)
{
struct rcu_head *next;
/*
* This is used when the "bulk" path can not be used for the
* double-argument of kvfree_rcu(). This happens when the
* page-cache is empty, which means that objects are instead
* queued on a linked list through their rcu_head structures.
* This list is named "Channel 3".
*/
for (; head; head = next) { for (; head; head = next) {
void *ptr = (void *) head->func; void *ptr = (void *) head->func;
unsigned long offset = (void *) head - ptr; unsigned long offset = (void *) head - ptr;
...@@ -3115,6 +3088,49 @@ static void kfree_rcu_work(struct work_struct *work) ...@@ -3115,6 +3088,49 @@ static void kfree_rcu_work(struct work_struct *work)
} }
} }
/*
* This function is invoked in workqueue context after a grace period.
* It frees all the objects queued on ->bulk_head_free or ->head_free.
*/
static void kfree_rcu_work(struct work_struct *work)
{
unsigned long flags;
struct kvfree_rcu_bulk_data *bnode, *n;
struct list_head bulk_head[FREE_N_CHANNELS];
struct rcu_head *head;
struct kfree_rcu_cpu *krcp;
struct kfree_rcu_cpu_work *krwp;
int i;
krwp = container_of(to_rcu_work(work),
struct kfree_rcu_cpu_work, rcu_work);
krcp = krwp->krcp;
raw_spin_lock_irqsave(&krcp->lock, flags);
// Channels 1 and 2.
for (i = 0; i < FREE_N_CHANNELS; i++)
list_replace_init(&krwp->bulk_head_free[i], &bulk_head[i]);
// Channel 3.
head = krwp->head_free;
krwp->head_free = NULL;
raw_spin_unlock_irqrestore(&krcp->lock, flags);
// Handle the first two channels.
for (i = 0; i < FREE_N_CHANNELS; i++)
list_for_each_entry_safe(bnode, n, &bulk_head[i], list)
kvfree_rcu_bulk(krcp, bnode, i);
/*
* This is used when the "bulk" path can not be used for the
* double-argument of kvfree_rcu(). This happens when the
* page-cache is empty, which means that objects are instead
* queued on a linked list through their rcu_head structures.
* This list is named "Channel 3".
*/
kvfree_rcu_list(head);
}
static bool static bool
need_offload_krc(struct kfree_rcu_cpu *krcp) need_offload_krc(struct kfree_rcu_cpu *krcp)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment