Commit db2351eb authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Mike Snitzer

dm kcopyd: avoid useless atomic operations

The functions set_bit and clear_bit are atomic. We don't need
atomicity when making flags for dm-kcopyd. So, change them to direct
manipulation of the flags.
Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 6b06dd5a
...@@ -341,7 +341,7 @@ static void client_free_pages(struct dm_kcopyd_client *kc) ...@@ -341,7 +341,7 @@ static void client_free_pages(struct dm_kcopyd_client *kc)
struct kcopyd_job { struct kcopyd_job {
struct dm_kcopyd_client *kc; struct dm_kcopyd_client *kc;
struct list_head list; struct list_head list;
unsigned long flags; unsigned flags;
/* /*
* Error state of the job. * Error state of the job.
...@@ -418,7 +418,7 @@ static struct kcopyd_job *pop_io_job(struct list_head *jobs, ...@@ -418,7 +418,7 @@ static struct kcopyd_job *pop_io_job(struct list_head *jobs,
* constraint and sequential writes that are at the right position. * constraint and sequential writes that are at the right position.
*/ */
list_for_each_entry(job, jobs, list) { list_for_each_entry(job, jobs, list) {
if (job->rw == READ || !test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags)) { if (job->rw == READ || !(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) {
list_del(&job->list); list_del(&job->list);
return job; return job;
} }
...@@ -525,7 +525,7 @@ static void complete_io(unsigned long error, void *context) ...@@ -525,7 +525,7 @@ static void complete_io(unsigned long error, void *context)
else else
job->read_err = 1; job->read_err = 1;
if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) { if (!(job->flags & BIT(DM_KCOPYD_IGNORE_ERROR))) {
push(&kc->complete_jobs, job); push(&kc->complete_jobs, job);
wake(kc); wake(kc);
return; return;
...@@ -565,7 +565,7 @@ static int run_io_job(struct kcopyd_job *job) ...@@ -565,7 +565,7 @@ static int run_io_job(struct kcopyd_job *job)
* If we need to write sequentially and some reads or writes failed, * If we need to write sequentially and some reads or writes failed,
* no point in continuing. * no point in continuing.
*/ */
if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) && if (job->flags & BIT(DM_KCOPYD_WRITE_SEQ) &&
job->master_job->write_err) { job->master_job->write_err) {
job->write_err = job->master_job->write_err; job->write_err = job->master_job->write_err;
return -EIO; return -EIO;
...@@ -709,7 +709,7 @@ static void segment_complete(int read_err, unsigned long write_err, ...@@ -709,7 +709,7 @@ static void segment_complete(int read_err, unsigned long write_err,
* Only dispatch more work if there hasn't been an error. * Only dispatch more work if there hasn't been an error.
*/ */
if ((!job->read_err && !job->write_err) || if ((!job->read_err && !job->write_err) ||
test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) { job->flags & BIT(DM_KCOPYD_IGNORE_ERROR)) {
/* get the next chunk of work */ /* get the next chunk of work */
progress = job->progress; progress = job->progress;
count = job->source.count - progress; count = job->source.count - progress;
...@@ -801,10 +801,10 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, ...@@ -801,10 +801,10 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
* we need to write sequentially. If one of the destination is a * we need to write sequentially. If one of the destination is a
* host-aware device, then leave it to the caller to choose what to do. * host-aware device, then leave it to the caller to choose what to do.
*/ */
if (!test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags)) { if (!(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) {
for (i = 0; i < job->num_dests; i++) { for (i = 0; i < job->num_dests; i++) {
if (bdev_zoned_model(dests[i].bdev) == BLK_ZONED_HM) { if (bdev_zoned_model(dests[i].bdev) == BLK_ZONED_HM) {
set_bit(DM_KCOPYD_WRITE_SEQ, &job->flags); job->flags |= BIT(DM_KCOPYD_WRITE_SEQ);
break; break;
} }
} }
...@@ -813,9 +813,9 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, ...@@ -813,9 +813,9 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
/* /*
* If we need to write sequentially, errors cannot be ignored. * If we need to write sequentially, errors cannot be ignored.
*/ */
if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) && if (job->flags & BIT(DM_KCOPYD_WRITE_SEQ) &&
test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) job->flags & BIT(DM_KCOPYD_IGNORE_ERROR))
clear_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags); job->flags &= ~BIT(DM_KCOPYD_IGNORE_ERROR);
if (from) { if (from) {
job->source = *from; job->source = *from;
......
...@@ -364,7 +364,7 @@ static void recover(struct mirror_set *ms, struct dm_region *reg) ...@@ -364,7 +364,7 @@ static void recover(struct mirror_set *ms, struct dm_region *reg)
/* hand to kcopyd */ /* hand to kcopyd */
if (!errors_handled(ms)) if (!errors_handled(ms))
set_bit(DM_KCOPYD_IGNORE_ERROR, &flags); flags |= BIT(DM_KCOPYD_IGNORE_ERROR);
dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
flags, recovery_complete, reg); flags, recovery_complete, reg);
......
...@@ -134,7 +134,7 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc, ...@@ -134,7 +134,7 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
dst_zone_block = dmz_start_block(zmd, dst_zone); dst_zone_block = dmz_start_block(zmd, dst_zone);
if (dmz_is_seq(dst_zone)) if (dmz_is_seq(dst_zone))
set_bit(DM_KCOPYD_WRITE_SEQ, &flags); flags |= BIT(DM_KCOPYD_WRITE_SEQ);
while (block < end_block) { while (block < end_block) {
if (src_zone->dev->flags & DMZ_BDEV_DYING) if (src_zone->dev->flags & DMZ_BDEV_DYING)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment