Commit 23e9dde0 authored by Peng Zhang's avatar Peng Zhang Committed by Andrew Morton

maple_tree: optimize mas_wr_append(), also improve duplicating VMAs

When the new range can be completely covered by the original last range
without touching the boundaries on both sides, two new entries can be
appended to the end as a fast path. We update the original last pivot at
the end, and the newly appended two entries will not be accessed before
this, so it is also safe in RCU mode.

This is useful for sequential insertion, which is what we do in
dup_mmap(). Enabling BENCH_FORK in test_maple_tree and just running
bench_forking() gives the following time-consuming numbers:

before:               after:
17,874.83 msec        15,738.38 msec

It shows about a 12% performance improvement for duplicating VMAs.

Link: https://lkml.kernel.org/r/20230628073657.75314-4-zhangpeng.00@bytedance.comSigned-off-by: default avatarPeng Zhang <zhangpeng.00@bytedance.com>
Reviewed-by: default avatarLiam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent c38d9ff2
...@@ -4267,10 +4267,10 @@ static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas) ...@@ -4267,10 +4267,10 @@ static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
* *
* Return: True if appended, false otherwise * Return: True if appended, false otherwise
*/ */
static inline bool mas_wr_append(struct ma_wr_state *wr_mas) static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
unsigned char new_end)
{ {
unsigned char end = wr_mas->node_end; unsigned char end = wr_mas->node_end;
unsigned char new_end = end + 1;
struct ma_state *mas = wr_mas->mas; struct ma_state *mas = wr_mas->mas;
unsigned char node_pivots = mt_pivots[wr_mas->type]; unsigned char node_pivots = mt_pivots[wr_mas->type];
...@@ -4282,17 +4282,28 @@ static inline bool mas_wr_append(struct ma_wr_state *wr_mas) ...@@ -4282,17 +4282,28 @@ static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end); ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
} }
if (new_end == wr_mas->node_end + 1) {
if (mas->last == wr_mas->r_max) { if (mas->last == wr_mas->r_max) {
/* Append to end of range */ /* Append to end of range */
rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry); rcu_assign_pointer(wr_mas->slots[new_end],
wr_mas->entry);
wr_mas->pivots[end] = mas->index - 1; wr_mas->pivots[end] = mas->index - 1;
mas->offset = new_end; mas->offset = new_end;
} else { } else {
/* Append to start of range */ /* Append to start of range */
rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content); rcu_assign_pointer(wr_mas->slots[new_end],
wr_mas->content);
wr_mas->pivots[end] = mas->last; wr_mas->pivots[end] = mas->last;
rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry); rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry);
} }
} else {
/* Append to the range without touching any boundaries. */
rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content);
wr_mas->pivots[end + 1] = mas->last;
rcu_assign_pointer(wr_mas->slots[end + 1], wr_mas->entry);
wr_mas->pivots[end] = mas->index - 1;
mas->offset = end + 1;
}
if (!wr_mas->content || !wr_mas->entry) if (!wr_mas->content || !wr_mas->entry)
mas_update_gap(mas); mas_update_gap(mas);
...@@ -4338,7 +4349,7 @@ static inline void mas_wr_modify(struct ma_wr_state *wr_mas) ...@@ -4338,7 +4349,7 @@ static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
goto slow_path; goto slow_path;
/* Attempt to append */ /* Attempt to append */
if (new_end == wr_mas->node_end + 1 && mas_wr_append(wr_mas)) if (mas_wr_append(wr_mas, new_end))
return; return;
if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas)) if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment