Commit 3eaa11e2 authored by Javier González's avatar Javier González Committed by Jens Axboe

lightnvm: pblk: control I/O flow also on tear down

When removing a pblk instance, control the write I/O flow to the
controller as we do in the fast path.
Signed-off-by: default avatarJavier González <javier@cnexlabs.com>
Signed-off-by: default avatarMatias Bjørling <matias@cnexlabs.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 0e2ff113
...@@ -1670,13 +1670,10 @@ void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv, ...@@ -1670,13 +1670,10 @@ void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
queue_work(wq, &line_ws->ws); queue_work(wq, &line_ws->ws);
} }
void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas, static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
unsigned long *lun_bitmap) int nr_ppas, int pos)
{ {
struct nvm_tgt_dev *dev = pblk->dev; struct pblk_lun *rlun = &pblk->luns[pos];
struct nvm_geo *geo = &dev->geo;
struct pblk_lun *rlun;
int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
int ret; int ret;
/* /*
...@@ -1690,14 +1687,8 @@ void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas, ...@@ -1690,14 +1687,8 @@ void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun || WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
ppa_list[0].g.ch != ppa_list[i].g.ch); ppa_list[0].g.ch != ppa_list[i].g.ch);
#endif #endif
/* If the LUN has been locked for this same request, do no attempt to
* lock it again
*/
if (test_and_set_bit(pos, lun_bitmap))
return;
rlun = &pblk->luns[pos]; ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
if (ret) { if (ret) {
switch (ret) { switch (ret) {
case -ETIME: case -ETIME:
...@@ -1710,6 +1701,50 @@ void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas, ...@@ -1710,6 +1701,50 @@ void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
} }
} }
void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
__pblk_down_page(pblk, ppa_list, nr_ppas, pos);
}
void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
unsigned long *lun_bitmap)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
/* If the LUN has been locked for this same request, do no attempt to
* lock it again
*/
if (test_and_set_bit(pos, lun_bitmap))
return;
__pblk_down_page(pblk, ppa_list, nr_ppas, pos);
}
void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
struct pblk_lun *rlun;
int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
#ifdef CONFIG_NVM_DEBUG
int i;
for (i = 1; i < nr_ppas; i++)
WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
ppa_list[0].g.ch != ppa_list[i].g.ch);
#endif
rlun = &pblk->luns[pos];
up(&rlun->wr_sem);
}
void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas, void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
unsigned long *lun_bitmap) unsigned long *lun_bitmap)
{ {
......
...@@ -340,9 +340,14 @@ static void pblk_end_io_recov(struct nvm_rq *rqd) ...@@ -340,9 +340,14 @@ static void pblk_end_io_recov(struct nvm_rq *rqd)
struct pblk *pblk = pad_rq->pblk; struct pblk *pblk = pad_rq->pblk;
struct nvm_tgt_dev *dev = pblk->dev; struct nvm_tgt_dev *dev = pblk->dev;
kref_put(&pad_rq->ref, pblk_recov_complete); pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
bio_put(rqd->bio);
nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list); nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
pblk_free_rqd(pblk, rqd, WRITE); pblk_free_rqd(pblk, rqd, WRITE);
atomic_dec(&pblk->inflight_io);
kref_put(&pad_rq->ref, pblk_recov_complete);
} }
static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line, static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line,
...@@ -385,7 +390,7 @@ static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line, ...@@ -385,7 +390,7 @@ static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line,
rq_ppas = pblk_calc_secs(pblk, left_ppas, 0); rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
if (rq_ppas < pblk->min_write_pgs) { if (rq_ppas < pblk->min_write_pgs) {
pr_err("pblk: corrupted pad line %d\n", line->id); pr_err("pblk: corrupted pad line %d\n", line->id);
goto free_rq; goto fail_free_pad;
} }
rq_len = rq_ppas * geo->sec_size; rq_len = rq_ppas * geo->sec_size;
...@@ -393,7 +398,7 @@ static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line, ...@@ -393,7 +398,7 @@ static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line,
meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list); meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
if (!meta_list) { if (!meta_list) {
ret = -ENOMEM; ret = -ENOMEM;
goto free_data; goto fail_free_pad;
} }
ppa_list = (void *)(meta_list) + pblk_dma_meta_size; ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
...@@ -404,9 +409,9 @@ static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line, ...@@ -404,9 +409,9 @@ static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line,
ret = PTR_ERR(rqd); ret = PTR_ERR(rqd);
goto fail_free_meta; goto fail_free_meta;
} }
memset(rqd, 0, pblk_w_rq_size);
bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL); bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
PBLK_VMALLOC_META, GFP_KERNEL);
if (IS_ERR(bio)) { if (IS_ERR(bio)) {
ret = PTR_ERR(bio); ret = PTR_ERR(bio);
goto fail_free_rqd; goto fail_free_rqd;
...@@ -453,15 +458,15 @@ static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line, ...@@ -453,15 +458,15 @@ static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line,
} }
kref_get(&pad_rq->ref); kref_get(&pad_rq->ref);
pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
ret = pblk_submit_io(pblk, rqd); ret = pblk_submit_io(pblk, rqd);
if (ret) { if (ret) {
pr_err("pblk: I/O submission failed: %d\n", ret); pr_err("pblk: I/O submission failed: %d\n", ret);
goto free_data; pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
goto fail_free_bio;
} }
atomic_dec(&pblk->inflight_io);
left_line_ppas -= rq_ppas; left_line_ppas -= rq_ppas;
left_ppas -= rq_ppas; left_ppas -= rq_ppas;
if (left_ppas && left_line_ppas) if (left_ppas && left_line_ppas)
...@@ -475,17 +480,23 @@ static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line, ...@@ -475,17 +480,23 @@ static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line,
ret = -ETIME; ret = -ETIME;
} }
if (!pblk_line_is_full(line))
pr_err("pblk: corrupted padded line: %d\n", line->id);
vfree(data);
free_rq: free_rq:
kfree(pad_rq); kfree(pad_rq);
free_data:
vfree(data);
return ret; return ret;
fail_free_bio:
bio_put(bio);
fail_free_rqd: fail_free_rqd:
pblk_free_rqd(pblk, rqd, WRITE); pblk_free_rqd(pblk, rqd, WRITE);
fail_free_meta: fail_free_meta:
nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list); nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
fail_free_pad:
kfree(pad_rq); kfree(pad_rq);
vfree(data);
return ret; return ret;
} }
......
...@@ -178,15 +178,12 @@ static void pblk_end_io_write_meta(struct nvm_rq *rqd) ...@@ -178,15 +178,12 @@ static void pblk_end_io_write_meta(struct nvm_rq *rqd)
{ {
struct pblk *pblk = rqd->private; struct pblk *pblk = rqd->private;
struct nvm_tgt_dev *dev = pblk->dev; struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd); struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
struct pblk_line *line = m_ctx->private; struct pblk_line *line = m_ctx->private;
struct pblk_emeta *emeta = line->emeta; struct pblk_emeta *emeta = line->emeta;
int pos = pblk_ppa_to_pos(geo, rqd->ppa_list[0]);
struct pblk_lun *rlun = &pblk->luns[pos];
int sync; int sync;
up(&rlun->wr_sem); pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
if (rqd->error) { if (rqd->error) {
pblk_log_write_err(pblk, rqd); pblk_log_write_err(pblk, rqd);
...@@ -203,6 +200,7 @@ static void pblk_end_io_write_meta(struct nvm_rq *rqd) ...@@ -203,6 +200,7 @@ static void pblk_end_io_write_meta(struct nvm_rq *rqd)
pblk->close_wq); pblk->close_wq);
bio_put(rqd->bio); bio_put(rqd->bio);
nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
pblk_free_rqd(pblk, rqd, READ); pblk_free_rqd(pblk, rqd, READ);
atomic_dec(&pblk->inflight_io); atomic_dec(&pblk->inflight_io);
...@@ -367,7 +365,6 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line) ...@@ -367,7 +365,6 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
struct pblk_line_meta *lm = &pblk->lm; struct pblk_line_meta *lm = &pblk->lm;
struct pblk_emeta *emeta = meta_line->emeta; struct pblk_emeta *emeta = meta_line->emeta;
struct pblk_g_ctx *m_ctx; struct pblk_g_ctx *m_ctx;
struct pblk_lun *rlun;
struct bio *bio; struct bio *bio;
struct nvm_rq *rqd; struct nvm_rq *rqd;
void *data; void *data;
...@@ -411,13 +408,6 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line) ...@@ -411,13 +408,6 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id); rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
} }
rlun = &pblk->luns[pblk_ppa_to_pos(geo, rqd->ppa_list[0])];
ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
if (ret) {
pr_err("pblk: lun semaphore timed out (%d)\n", ret);
goto fail_free_bio;
}
emeta->mem += rq_len; emeta->mem += rq_len;
if (emeta->mem >= lm->emeta_len[0]) { if (emeta->mem >= lm->emeta_len[0]) {
spin_lock(&l_mg->close_lock); spin_lock(&l_mg->close_lock);
...@@ -427,6 +417,8 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line) ...@@ -427,6 +417,8 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
spin_unlock(&l_mg->close_lock); spin_unlock(&l_mg->close_lock);
} }
pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
ret = pblk_submit_io(pblk, rqd); ret = pblk_submit_io(pblk, rqd);
if (ret) { if (ret) {
pr_err("pblk: emeta I/O submission failed: %d\n", ret); pr_err("pblk: emeta I/O submission failed: %d\n", ret);
...@@ -436,10 +428,13 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line) ...@@ -436,10 +428,13 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
return NVM_IO_OK; return NVM_IO_OK;
fail_rollback: fail_rollback:
pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
spin_lock(&l_mg->close_lock); spin_lock(&l_mg->close_lock);
pblk_dealloc_page(pblk, meta_line, rq_ppas); pblk_dealloc_page(pblk, meta_line, rq_ppas);
list_add(&meta_line->list, &meta_line->list); list_add(&meta_line->list, &meta_line->list);
spin_unlock(&l_mg->close_lock); spin_unlock(&l_mg->close_lock);
nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
fail_free_bio: fail_free_bio:
if (likely(l_mg->emeta_alloc_type == PBLK_VMALLOC_META)) if (likely(l_mg->emeta_alloc_type == PBLK_VMALLOC_META))
bio_put(bio); bio_put(bio);
......
...@@ -739,8 +739,10 @@ u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs); ...@@ -739,8 +739,10 @@ u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs); u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail, int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
unsigned long secs_to_flush); unsigned long secs_to_flush);
void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas, void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
unsigned long *lun_bitmap); unsigned long *lun_bitmap);
void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas, void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
unsigned long *lun_bitmap); unsigned long *lun_bitmap);
void pblk_end_bio_sync(struct bio *bio); void pblk_end_bio_sync(struct bio *bio);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment