Skip to content

Commit 4b68bf9

Browse files
Arne Edholmrichardweinberger
authored andcommitted
ubi: Select fastmap anchor PEBs considering wear level rules
There is a risk that the fastmap anchor PEB is alternating between just two PEBs, the current anchor and the previous anchor that was just deleted. As the fastmap pools gets the first take on free PEBs, the pools may leave no free PEBs to be selected as the new anchor, resulting in the two PEBs alternating behaviour. If the anchor PEBs gets a high erase count the PEBs will not be used by the pools but remain in ubi->free, even more increasing the likelihood they will be used as anchors. Getting stuck using only a couple of PEBs continuously will result in an uneven wear, eventually leading to failure. To fix this: - Choose the fastmap anchor when the most free PEBs are available. This is during rebuilding of the fastmap pools, after the unused pool PEBs are added to ubi->free but before the pools are populated again from the free PEBs. Also reserve an additional second best PEB as a candidate for the next time the fast map anchor is updated. If a better PEB is found the next time the fast map anchor is updated, the candidate is made available for building the pools. - Enable anchor move within the anchor area again as it is useful for distributing wear. - The anchor candidate for the next fastmap update is the most suited free PEB. Check this PEB's erase count during wear leveling. If the wear leveling limit is exceeded, the PEB is considered unsuitable for now. As all other non used anchor area PEBs should be even worse, free up the used anchor area PEB with the lowest erase count. Signed-off-by: Arne Edholm <[email protected]> Signed-off-by: Richard Weinberger <[email protected]>
1 parent 3d77e6a commit 4b68bf9

File tree

4 files changed

+57
-25
lines changed

4 files changed

+57
-25
lines changed

drivers/mtd/ubi/fastmap-wl.c

Lines changed: 24 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,21 @@ void ubi_refill_pools(struct ubi_device *ubi)
116116
wl_pool->size = 0;
117117
pool->size = 0;
118118

119+
if (ubi->fm_anchor) {
120+
wl_tree_add(ubi->fm_anchor, &ubi->free);
121+
ubi->free_count++;
122+
}
123+
if (ubi->fm_next_anchor) {
124+
wl_tree_add(ubi->fm_next_anchor, &ubi->free);
125+
ubi->free_count++;
126+
}
127+
128+
/* All available PEBs are in ubi->free, now is the time to get
129+
* the best anchor PEBs.
130+
*/
131+
ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
132+
ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
133+
119134
for (;;) {
120135
enough = 0;
121136
if (pool->size < pool->max_size) {
@@ -271,26 +286,20 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
271286
int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
272287
{
273288
struct ubi_work *wrk;
274-
struct ubi_wl_entry *anchor;
275289

276290
spin_lock(&ubi->wl_lock);
277291

278-
/* Do we already have an anchor? */
279-
if (ubi->fm_anchor) {
280-
spin_unlock(&ubi->wl_lock);
281-
return 0;
282-
}
283-
284-
/* See if we can find an anchor PEB on the list of free PEBs */
285-
anchor = ubi_wl_get_fm_peb(ubi, 1);
286-
if (anchor) {
287-
ubi->fm_anchor = anchor;
288-
spin_unlock(&ubi->wl_lock);
289-
return 0;
292+
/* Do we have a next anchor? */
293+
if (!ubi->fm_next_anchor) {
294+
ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
295+
if (!ubi->fm_next_anchor)
296+
/* Tell wear leveling to produce a new anchor PEB */
297+
ubi->fm_do_produce_anchor = 1;
290298
}
291299

292-
/* No luck, trigger wear leveling to produce a new anchor PEB */
293-
ubi->fm_do_produce_anchor = 1;
300+
/* Do wear leveling to get a new anchor PEB or check the
301+
* existing next anchor candidate.
302+
*/
294303
if (ubi->wl_scheduled) {
295304
spin_unlock(&ubi->wl_lock);
296305
return 0;

drivers/mtd/ubi/fastmap.c

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1220,6 +1220,17 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
12201220
fm_pos += sizeof(*fec);
12211221
ubi_assert(fm_pos <= ubi->fm_size);
12221222
}
1223+
if (ubi->fm_next_anchor) {
1224+
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1225+
1226+
fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum);
1227+
set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs);
1228+
fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec);
1229+
1230+
free_peb_count++;
1231+
fm_pos += sizeof(*fec);
1232+
ubi_assert(fm_pos <= ubi->fm_size);
1233+
}
12231234
fmh->free_peb_count = cpu_to_be32(free_peb_count);
12241235

12251236
ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {

drivers/mtd/ubi/ubi.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -491,7 +491,8 @@ struct ubi_debug_info {
491491
* @fm_work: fastmap work queue
492492
* @fm_work_scheduled: non-zero if fastmap work was scheduled
493493
* @fast_attach: non-zero if UBI was attached by fastmap
494-
* @fm_anchor: The next anchor PEB to use for fastmap
494+
* @fm_anchor: The new anchor PEB used during fastmap update
495+
* @fm_next_anchor: An anchor PEB candidate for the next time fastmap is updated
495496
* @fm_do_produce_anchor: If true produce an anchor PEB in wl
496497
*
497498
* @used: RB-tree of used physical eraseblocks
@@ -602,6 +603,7 @@ struct ubi_device {
602603
int fm_work_scheduled;
603604
int fast_attach;
604605
struct ubi_wl_entry *fm_anchor;
606+
struct ubi_wl_entry *fm_next_anchor;
605607
int fm_do_produce_anchor;
606608

607609
/* Wear-leveling sub-system's stuff */

drivers/mtd/ubi/wl.c

Lines changed: 19 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -687,20 +687,27 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
687687
}
688688

689689
#ifdef CONFIG_MTD_UBI_FASTMAP
690+
e1 = find_anchor_wl_entry(&ubi->used);
691+
if (e1 && ubi->fm_next_anchor &&
692+
(ubi->fm_next_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
693+
ubi->fm_do_produce_anchor = 1;
694+
/* fm_next_anchor is no longer considered a good anchor
695+
* candidate.
696+
* NULL assignment also prevents multiple wear level checks
697+
* of this PEB.
698+
*/
699+
wl_tree_add(ubi->fm_next_anchor, &ubi->free);
700+
ubi->fm_next_anchor = NULL;
701+
ubi->free_count++;
702+
}
703+
690704
if (ubi->fm_do_produce_anchor) {
691-
e1 = find_anchor_wl_entry(&ubi->used);
692705
if (!e1)
693706
goto out_cancel;
694707
e2 = get_peb_for_wl(ubi);
695708
if (!e2)
696709
goto out_cancel;
697710

698-
/*
699-
* Anchor move within the anchor area is useless.
700-
*/
701-
if (e2->pnum < UBI_FM_MAX_START)
702-
goto out_cancel;
703-
704711
self_check_in_wl_tree(ubi, e1, &ubi->used);
705712
rb_erase(&e1->u.rb, &ubi->used);
706713
dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
@@ -1079,8 +1086,11 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
10791086
if (!err) {
10801087
spin_lock(&ubi->wl_lock);
10811088

1082-
if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) {
1083-
ubi->fm_anchor = e;
1089+
if (!ubi->fm_next_anchor && e->pnum < UBI_FM_MAX_START) {
1090+
/* Abort anchor production, if needed it will be
1091+
* enabled again in the wear leveling started below.
1092+
*/
1093+
ubi->fm_next_anchor = e;
10841094
ubi->fm_do_produce_anchor = 0;
10851095
} else {
10861096
wl_tree_add(e, &ubi->free);

0 commit comments

Comments
 (0)