Commit 19371d73 authored by Richard Weinberger's avatar Richard Weinberger

UBI: Fastmap: Ensure that only one fastmap work is scheduled

If the WL pool runs out of PEBs we schedule a fastmap write
to refill it as soon as possible.
Ensure that only one at a time is scheduled otherwise we might end in
a fastmap write storm because writing the fastmap can schedule another
write if bitflips are detected.
Signed-off-by: default avatarRichard Weinberger <richard@nod.at>
Reviewed-by: default avatarTanya Brokhman <tlinder@codeaurora.org>
Reviewed-by: default avatarGuido Martínez <guido@vanguardiasur.com.ar>
parent ab6de685
...@@ -431,6 +431,7 @@ struct ubi_debug_info { ...@@ -431,6 +431,7 @@ struct ubi_debug_info {
* @fm_size: fastmap size in bytes * @fm_size: fastmap size in bytes
* @fm_sem: allows ubi_update_fastmap() to block EBA table changes * @fm_sem: allows ubi_update_fastmap() to block EBA table changes
* @fm_work: fastmap work queue * @fm_work: fastmap work queue
* @fm_work_scheduled: non-zero if fastmap work was scheduled
* *
* @used: RB-tree of used physical eraseblocks * @used: RB-tree of used physical eraseblocks
* @erroneous: RB-tree of erroneous used physical eraseblocks * @erroneous: RB-tree of erroneous used physical eraseblocks
...@@ -442,7 +443,7 @@ struct ubi_debug_info { ...@@ -442,7 +443,7 @@ struct ubi_debug_info {
* @pq_head: protection queue head * @pq_head: protection queue head
* @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
* @move_to, @move_to_put @erase_pending, @wl_scheduled, @works, * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
* @erroneous, and @erroneous_peb_count fields * @erroneous, @erroneous_peb_count, and @fm_work_scheduled fields
* @move_mutex: serializes eraseblock moves * @move_mutex: serializes eraseblock moves
* @work_sem: used to wait for all the scheduled works to finish and prevent * @work_sem: used to wait for all the scheduled works to finish and prevent
* new works from being submitted * new works from being submitted
...@@ -537,6 +538,7 @@ struct ubi_device { ...@@ -537,6 +538,7 @@ struct ubi_device {
void *fm_buf; void *fm_buf;
size_t fm_size; size_t fm_size;
struct work_struct fm_work; struct work_struct fm_work;
int fm_work_scheduled;
/* Wear-leveling sub-system's stuff */ /* Wear-leveling sub-system's stuff */
struct rb_root used; struct rb_root used;
......
...@@ -149,6 +149,9 @@ static void update_fastmap_work_fn(struct work_struct *wrk) ...@@ -149,6 +149,9 @@ static void update_fastmap_work_fn(struct work_struct *wrk)
{ {
struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work); struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
ubi_update_fastmap(ubi); ubi_update_fastmap(ubi);
spin_lock(&ubi->wl_lock);
ubi->fm_work_scheduled = 0;
spin_unlock(&ubi->wl_lock);
} }
/** /**
...@@ -657,7 +660,10 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) ...@@ -657,7 +660,10 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
/* We cannot update the fastmap here because this /* We cannot update the fastmap here because this
* function is called in atomic context. * function is called in atomic context.
* Let's fail here and refill/update it as soon as possible. */ * Let's fail here and refill/update it as soon as possible. */
schedule_work(&ubi->fm_work); if (!ubi->fm_work_scheduled) {
ubi->fm_work_scheduled = 1;
schedule_work(&ubi->fm_work);
}
return NULL; return NULL;
} else { } else {
pnum = pool->pebs[pool->used++]; pnum = pool->pebs[pool->used++];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment