Commit 5610789a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mtd/fixes-for-5.0-rc6' of git://git.infradead.org/linux-mtd

Pull mtd fixes from Boris Brezillon:

 - Fix a problem with the imx28 ECC engine

 - Remove a debug trace introduced in 2b6f0090 ("mtd: Check
   add_mtd_device() ret code")

 - Make sure partitions of size 0 can be registered

 - Fix kernel-doc warning in the rawnand core

 - Fix the error path of spinand_init() (missing manufacturer cleanup in
   a few places)

 - Address a problem with the SPI NAND PROGRAM LOAD operation which does
   not work as expected on some parts.

* tag 'mtd/fixes-for-5.0-rc6' of git://git.infradead.org/linux-mtd:
  mtd: rawnand: gpmi: fix MX28 bus master lockup problem
  mtd: Make sure mtd->erasesize is valid even if the partition is of size 0
  mtd: Remove a debug trace in mtdpart.c
  mtd: rawnand: fix kernel-doc warnings
  mtd: spinand: Fix the error/cleanup path in spinand_init()
  mtd: spinand: Handle the case where PROGRAM LOAD does not reset the cache
parents 3e5e692f d5d27fd9
......@@ -480,6 +480,10 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
/* let's register it anyway to preserve ordering */
slave->offset = 0;
slave->mtd.size = 0;
/* Initialize ->erasesize to make add_mtd_device() happy. */
slave->mtd.erasesize = parent->erasesize;
printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
part->name);
goto out_register;
......@@ -632,7 +636,6 @@ int mtd_add_partition(struct mtd_info *parent, const char *name,
mutex_unlock(&mtd_partitions_mutex);
free_partition(new);
pr_info("%s:%i\n", __func__, __LINE__);
return ret;
}
......
......@@ -155,9 +155,10 @@ int gpmi_init(struct gpmi_nand_data *this)
/*
* Reset BCH here, too. We got failures otherwise :(
* See later BCH reset for explanation of MX23 handling
* See later BCH reset for explanation of MX23 and MX28 handling
*/
ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
ret = gpmi_reset_block(r->bch_regs,
GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
if (ret)
goto err_out;
......@@ -263,12 +264,10 @@ int bch_set_geometry(struct gpmi_nand_data *this)
/*
* Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
* chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
* On the other hand, the MX28 needs the reset, because one case has been
* seen where the BCH produced ECC errors constantly after 10000
* consecutive reboots. The latter case has not been seen on the MX23
* yet, still we don't know if it could happen there as well.
* and MX28.
*/
ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
ret = gpmi_reset_block(r->bch_regs,
GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
if (ret)
goto err_out;
......
......@@ -410,6 +410,7 @@ static int nand_check_wp(struct nand_chip *chip)
/**
* nand_fill_oob - [INTERN] Transfer client buffer to oob
* @chip: NAND chip object
* @oob: oob data buffer
* @len: oob data write length
* @ops: oob ops structure
......
......@@ -158,7 +158,7 @@ static u32 add_marker_len(struct nand_bbt_descr *td)
/**
* read_bbt - [GENERIC] Read the bad block table starting from page
* @chip: NAND chip object
* @this: NAND chip object
* @buf: temporary buffer
* @page: the starting page
* @num: the number of bbt descriptors to read
......
......@@ -304,24 +304,30 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
struct nand_device *nand = spinand_to_nand(spinand);
struct mtd_info *mtd = nanddev_to_mtd(nand);
struct nand_page_io_req adjreq = *req;
unsigned int nbytes = 0;
void *buf = NULL;
void *buf = spinand->databuf;
unsigned int nbytes;
u16 column = 0;
int ret;
memset(spinand->databuf, 0xff,
nanddev_page_size(nand) +
nanddev_per_page_oobsize(nand));
/*
* Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
* the cache content to 0xFF (depends on vendor implementation), so we
* must fill the page cache entirely even if we only want to program
* the data portion of the page, otherwise we might corrupt the BBM or
* user data previously programmed in OOB area.
*/
nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
memset(spinand->databuf, 0xff, nbytes);
adjreq.dataoffs = 0;
adjreq.datalen = nanddev_page_size(nand);
adjreq.databuf.out = spinand->databuf;
adjreq.ooblen = nanddev_per_page_oobsize(nand);
adjreq.ooboffs = 0;
adjreq.oobbuf.out = spinand->oobbuf;
if (req->datalen) {
if (req->datalen)
memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
req->datalen);
adjreq.dataoffs = 0;
adjreq.datalen = nanddev_page_size(nand);
adjreq.databuf.out = spinand->databuf;
nbytes = adjreq.datalen;
buf = spinand->databuf;
}
if (req->ooblen) {
if (req->mode == MTD_OPS_AUTO_OOB)
......@@ -332,14 +338,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
else
memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
req->ooblen);
adjreq.ooblen = nanddev_per_page_oobsize(nand);
adjreq.ooboffs = 0;
nbytes += nanddev_per_page_oobsize(nand);
if (!buf) {
buf = spinand->oobbuf;
column = nanddev_page_size(nand);
}
}
spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
......@@ -370,8 +368,8 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
/*
* We need to use the RANDOM LOAD CACHE operation if there's
* more than one iteration, because the LOAD operation resets
* the cache to 0xff.
* more than one iteration, because the LOAD operation might
* reset the cache to 0xff.
*/
if (nbytes) {
column = op.addr.val;
......@@ -1018,11 +1016,11 @@ static int spinand_init(struct spinand_device *spinand)
for (i = 0; i < nand->memorg.ntargets; i++) {
ret = spinand_select_target(spinand, i);
if (ret)
goto err_free_bufs;
goto err_manuf_cleanup;
ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
if (ret)
goto err_free_bufs;
goto err_manuf_cleanup;
}
ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment