Commit 62593cf4 authored by Brian Norris's avatar Brian Norris

mtd: spi-nor: refactor block protection functions

This code was a bit sloppy, would produce a lot of copy-and-paste, and
did not always provide a sensible interface:

 * It didn't validate the length for LOCK and the offset for UNLOCK, so
   we were essentially discarding half of the user-supplied data and
   assuming what they wanted to lock/unlock
 * It didn't do very good error checking
 * It didn't make use of the fact that this operation works on
   power-of-two dimensions

So, rewrite this to do proper bit arithmetic rather than a bunch of
hard-coded condition tables. Now we have:

 * More comments on how this was derived
 * Notes on what is (and isn't) supported
 * A more exendible function, so we could add support for other
   protection ranges
 * More accurate locking - e.g., suppose the top quadrant is locked (75%
   to 100%); then in the following cases, case (a) will succeed but (b)
   will not (return -EINVAL):
     (a) user requests lock 3rd quadrant (50% to 75%)
     (b) user requests lock 3rd quadrant, minus a few blocks (e.g., 50%
         to 73%)
   Case (b) *should* fail, since we'd have to lock blocks that weren't
   requested. But the old implementation didn't know the difference and
   would lock the entire second half (50% to 100%)

This refactoring work will also help enable the addition of
mtd_is_locked() support and potentially the support of bottom boot
protection (TB=1).
Signed-off-by: default avatarBrian Norris <computersforpeace@gmail.com>
parent f8900258
...@@ -400,72 +400,153 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr) ...@@ -400,72 +400,153 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
return ret; return ret;
} }
static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
uint64_t *len)
{
struct mtd_info *mtd = &nor->mtd;
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
int shift = ffs(mask) - 1;
int pow;
if (!(sr & mask)) {
/* No protection */
*ofs = 0;
*len = 0;
} else {
pow = ((sr & mask) ^ mask) >> shift;
*len = mtd->size >> pow;
*ofs = mtd->size - *len;
}
}
/*
* Return 1 if the entire region is locked, 0 otherwise
*/
static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
u8 sr)
{
loff_t lock_offs;
uint64_t lock_len;
stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
}
/*
* Lock a region of the flash. Compatible with ST Micro and similar flash.
* Supports only the block protection bits BP{0,1,2} in the status register
* (SR). Does not support these features found in newer SR bitfields:
* - TB: top/bottom protect - only handle TB=0 (top protect)
* - SEC: sector/block protect - only handle SEC=0 (block protect)
* - CMP: complement protect - only support CMP=0 (range is not complemented)
*
* Sample table portion for 8MB flash (Winbond w25q64fw):
*
* SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
* --------------------------------------------------------------------------
* X | X | 0 | 0 | 0 | NONE | NONE
* 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
* 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
* 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
* 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
* 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
* 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
* X | X | 1 | 1 | 1 | 8 MB | ALL
*
* Returns negative on errors, 0 on success.
*/
static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{ {
struct mtd_info *mtd = &nor->mtd; struct mtd_info *mtd = &nor->mtd;
uint32_t offset = ofs; u8 status_old, status_new;
uint8_t status_old, status_new; u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
int ret = 0; u8 shift = ffs(mask) - 1, pow, val;
status_old = read_sr(nor); status_old = read_sr(nor);
if (offset < mtd->size - (mtd->size / 2)) /* SPI NOR always locks to the end */
status_new = status_old | SR_BP2 | SR_BP1 | SR_BP0; if (ofs + len != mtd->size) {
else if (offset < mtd->size - (mtd->size / 4)) /* Does combined region extend to end? */
status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1; if (!stm_is_locked_sr(nor, ofs + len, mtd->size - ofs - len,
else if (offset < mtd->size - (mtd->size / 8)) status_old))
status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0; return -EINVAL;
else if (offset < mtd->size - (mtd->size / 16)) len = mtd->size - ofs;
status_new = (status_old & ~(SR_BP0 | SR_BP1)) | SR_BP2; }
else if (offset < mtd->size - (mtd->size / 32))
status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0; /*
else if (offset < mtd->size - (mtd->size / 64)) * Need smallest pow such that:
status_new = (status_old & ~(SR_BP2 | SR_BP0)) | SR_BP1; *
else * 1 / (2^pow) <= (len / size)
status_new = (status_old & ~(SR_BP2 | SR_BP1)) | SR_BP0; *
* so (assuming power-of-2 size) we do:
*
* pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
*/
pow = ilog2(mtd->size) - ilog2(len);
val = mask - (pow << shift);
if (val & ~mask)
return -EINVAL;
/* Don't "lock" with no region! */
if (!(val & mask))
return -EINVAL;
status_new = (status_old & ~mask) | val;
/* Only modify protection if it will not unlock other areas */ /* Only modify protection if it will not unlock other areas */
if ((status_new & (SR_BP2 | SR_BP1 | SR_BP0)) > if ((status_new & mask) <= (status_old & mask))
(status_old & (SR_BP2 | SR_BP1 | SR_BP0))) { return -EINVAL;
write_enable(nor);
ret = write_sr(nor, status_new);
}
return ret; write_enable(nor);
return write_sr(nor, status_new);
} }
/*
* Unlock a region of the flash. See stm_lock() for more info
*
* Returns negative on errors, 0 on success.
*/
static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{ {
struct mtd_info *mtd = &nor->mtd; struct mtd_info *mtd = &nor->mtd;
uint32_t offset = ofs;
uint8_t status_old, status_new; uint8_t status_old, status_new;
int ret = 0; u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
u8 shift = ffs(mask) - 1, pow, val;
status_old = read_sr(nor); status_old = read_sr(nor);
if (offset+len > mtd->size - (mtd->size / 64)) /* Cannot unlock; would unlock larger region than requested */
status_new = status_old & ~(SR_BP2 | SR_BP1 | SR_BP0); if (stm_is_locked_sr(nor, status_old, ofs - mtd->erasesize,
else if (offset+len > mtd->size - (mtd->size / 32)) mtd->erasesize))
status_new = (status_old & ~(SR_BP2 | SR_BP1)) | SR_BP0; return -EINVAL;
else if (offset+len > mtd->size - (mtd->size / 16))
status_new = (status_old & ~(SR_BP2 | SR_BP0)) | SR_BP1;
else if (offset+len > mtd->size - (mtd->size / 8))
status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
else if (offset+len > mtd->size - (mtd->size / 4))
status_new = (status_old & ~(SR_BP0 | SR_BP1)) | SR_BP2;
else if (offset+len > mtd->size - (mtd->size / 2))
status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
else
status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
/* Only modify protection if it will not lock other areas */ /*
if ((status_new & (SR_BP2 | SR_BP1 | SR_BP0)) < * Need largest pow such that:
(status_old & (SR_BP2 | SR_BP1 | SR_BP0))) { *
write_enable(nor); * 1 / (2^pow) >= (len / size)
ret = write_sr(nor, status_new); *
* so (assuming power-of-2 size) we do:
*
* pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
*/
pow = ilog2(mtd->size) - order_base_2(mtd->size - (ofs + len));
if (ofs + len == mtd->size) {
val = 0; /* fully unlocked */
} else {
val = mask - (pow << shift);
/* Some power-of-two sizes are not supported */
if (val & ~mask)
return -EINVAL;
} }
return ret; status_new = (status_old & ~mask) | val;
/* Only modify protection if it will not lock other areas */
if ((status_new & mask) >= (status_old & mask))
return -EINVAL;
write_enable(nor);
return write_sr(nor, status_new);
} }
static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment