Commit 94a0b58d authored by Ira Weiny's avatar Ira Weiny Committed by David Sterba

btrfs: raid56: convert kmaps to kmap_local_page

These kmaps are thread local and don't need to be atomic.  So they can use
the more efficient kmap_local_page().  However, the mapping of pages in
the stripes and the additional parity and qstripe pages are a bit
trickier because the unmapping must occur in the opposite order from the
mapping.  Furthermore, the pointer array in __raid_recover_end_io() may
get reordered.

Convert these calls to kmap_local_page() taking care to reverse the
unmappings of any page arrays as well as being careful with the mappings
of any special pages such as the parity and qstripe pages.
Signed-off-by: default avatarIra Weiny <ira.weiny@intel.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 58c1a35c
...@@ -1232,13 +1232,13 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) ...@@ -1232,13 +1232,13 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
/* first collect one page from each data stripe */ /* first collect one page from each data stripe */
for (stripe = 0; stripe < nr_data; stripe++) { for (stripe = 0; stripe < nr_data; stripe++) {
p = page_in_rbio(rbio, stripe, pagenr, 0); p = page_in_rbio(rbio, stripe, pagenr, 0);
pointers[stripe] = kmap(p); pointers[stripe] = kmap_local_page(p);
} }
/* then add the parity stripe */ /* then add the parity stripe */
p = rbio_pstripe_page(rbio, pagenr); p = rbio_pstripe_page(rbio, pagenr);
SetPageUptodate(p); SetPageUptodate(p);
pointers[stripe++] = kmap(p); pointers[stripe++] = kmap_local_page(p);
if (has_qstripe) { if (has_qstripe) {
...@@ -1248,7 +1248,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) ...@@ -1248,7 +1248,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
*/ */
p = rbio_qstripe_page(rbio, pagenr); p = rbio_qstripe_page(rbio, pagenr);
SetPageUptodate(p); SetPageUptodate(p);
pointers[stripe++] = kmap(p); pointers[stripe++] = kmap_local_page(p);
raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
pointers); pointers);
...@@ -1257,10 +1257,8 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) ...@@ -1257,10 +1257,8 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
copy_page(pointers[nr_data], pointers[0]); copy_page(pointers[nr_data], pointers[0]);
run_xor(pointers + 1, nr_data - 1, PAGE_SIZE); run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
} }
for (stripe = stripe - 1; stripe >= 0; stripe--)
kunmap_local(pointers[stripe]);
for (stripe = 0; stripe < rbio->real_stripes; stripe++)
kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
} }
/* /*
...@@ -1777,6 +1775,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) ...@@ -1777,6 +1775,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
{ {
int pagenr, stripe; int pagenr, stripe;
void **pointers; void **pointers;
void **unmap_array;
int faila = -1, failb = -1; int faila = -1, failb = -1;
struct page *page; struct page *page;
blk_status_t err; blk_status_t err;
...@@ -1788,6 +1787,16 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) ...@@ -1788,6 +1787,16 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
goto cleanup_io; goto cleanup_io;
} }
/*
* Store copy of pointers that does not get reordered during
* reconstruction so that kunmap_local works.
*/
unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
if (!unmap_array) {
err = BLK_STS_RESOURCE;
goto cleanup_pointers;
}
faila = rbio->faila; faila = rbio->faila;
failb = rbio->failb; failb = rbio->failb;
...@@ -1809,8 +1818,11 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) ...@@ -1809,8 +1818,11 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
!test_bit(pagenr, rbio->dbitmap)) !test_bit(pagenr, rbio->dbitmap))
continue; continue;
/* setup our array of pointers with pages /*
* from each stripe * Setup our array of pointers with pages from each stripe
*
* NOTE: store a duplicate array of pointers to preserve the
* pointer order
*/ */
for (stripe = 0; stripe < rbio->real_stripes; stripe++) { for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
/* /*
...@@ -1824,7 +1836,8 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) ...@@ -1824,7 +1836,8 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
} else { } else {
page = rbio_stripe_page(rbio, stripe, pagenr); page = rbio_stripe_page(rbio, stripe, pagenr);
} }
pointers[stripe] = kmap(page); pointers[stripe] = kmap_local_page(page);
unmap_array[stripe] = pointers[stripe];
} }
/* all raid6 handling here */ /* all raid6 handling here */
...@@ -1917,24 +1930,14 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) ...@@ -1917,24 +1930,14 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
} }
} }
} }
for (stripe = 0; stripe < rbio->real_stripes; stripe++) { for (stripe = rbio->real_stripes - 1; stripe >= 0; stripe--)
/* kunmap_local(unmap_array[stripe]);
* if we're rebuilding a read, we have to use
* pages from the bio list
*/
if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
(stripe == faila || stripe == failb)) {
page = page_in_rbio(rbio, stripe, pagenr, 0);
} else {
page = rbio_stripe_page(rbio, stripe, pagenr);
}
kunmap(page);
}
} }
err = BLK_STS_OK; err = BLK_STS_OK;
cleanup: cleanup:
kfree(unmap_array);
cleanup_pointers:
kfree(pointers); kfree(pointers);
cleanup_io: cleanup_io:
...@@ -2359,13 +2362,13 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, ...@@ -2359,13 +2362,13 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
goto cleanup; goto cleanup;
} }
SetPageUptodate(q_page); SetPageUptodate(q_page);
pointers[rbio->real_stripes - 1] = kmap(q_page); pointers[rbio->real_stripes - 1] = kmap_local_page(q_page);
} }
atomic_set(&rbio->error, 0); atomic_set(&rbio->error, 0);
/* Map the parity stripe just once */ /* Map the parity stripe just once */
pointers[nr_data] = kmap(p_page); pointers[nr_data] = kmap_local_page(p_page);
for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
struct page *p; struct page *p;
...@@ -2373,7 +2376,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, ...@@ -2373,7 +2376,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
/* first collect one page from each data stripe */ /* first collect one page from each data stripe */
for (stripe = 0; stripe < nr_data; stripe++) { for (stripe = 0; stripe < nr_data; stripe++) {
p = page_in_rbio(rbio, stripe, pagenr, 0); p = page_in_rbio(rbio, stripe, pagenr, 0);
pointers[stripe] = kmap(p); pointers[stripe] = kmap_local_page(p);
} }
if (has_qstripe) { if (has_qstripe) {
...@@ -2396,14 +2399,14 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, ...@@ -2396,14 +2399,14 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
bitmap_clear(rbio->dbitmap, pagenr, 1); bitmap_clear(rbio->dbitmap, pagenr, 1);
kunmap_local(parity); kunmap_local(parity);
for (stripe = 0; stripe < nr_data; stripe++) for (stripe = nr_data - 1; stripe >= 0; stripe--)
kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); kunmap_local(pointers[stripe]);
} }
kunmap(p_page); kunmap_local(pointers[nr_data]);
__free_page(p_page); __free_page(p_page);
if (q_page) { if (q_page) {
kunmap(q_page); kunmap_local(pointers[rbio->real_stripes - 1]);
__free_page(q_page); __free_page(q_page);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment