Commit 8878b126 authored by Miquel Raynal's avatar Miquel Raynal Committed by Boris Brezillon

mtd: nand: add ->exec_op() implementation

Introduce a new interface to instruct NAND controllers to send specific
NAND operations. The new interface takes the form of a single method
called ->exec_op(). This method is designed to replace ->cmd_ctrl(),
->cmdfunc() and ->read/write_byte/word/buf() hooks.

->exec_op() is passed a set of instructions describing the operation
to execute. Each instruction has a type (ADDR, CMD, DATA, WAITRDY)
and delay. The delay is here to help simple controllers wait enough
time between each instruction, advanced controllers with integrated
timings control can ignore these delays.

Controllers that natively support complex operations (operations
formed of several instructions) can use the NAND op parser
infrastructure. This infrastructure allows controller drivers to
describe the sequence of instructions they support (called
nand_op_pattern) and a hook for each of these supported sequences. The
core then tries to find the best match for a given NAND operation, and
calls the associated hook.

Various other helpers are also added to ease NAND controller drivers
writing.

This new interface should ease support of vendor specific operations
in that NAND manufacturer drivers now have a way to check if the
controller they are connected to supports a specific operation, and
complain or refuse to probe the NAND chip when that's not the case.
Suggested-by: default avatarBoris Brezillon <boris.brezillon@free-electrons.com>
Signed-off-by: default avatarMiquel Raynal <miquel.raynal@free-electrons.com>
Signed-off-by: default avatarBoris Brezillon <boris.brezillon@free-electrons.com>
parent 707d8154
...@@ -688,6 +688,66 @@ static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo) ...@@ -688,6 +688,66 @@ static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
} while (time_before(jiffies, timeo)); } while (time_before(jiffies, timeo));
}; };
/**
* nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
* @chip: NAND chip structure
* @timeout_ms: Timeout in ms
*
* Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
* If that does not happen whitin the specified timeout, -ETIMEDOUT is
* returned.
*
* This helper is intended to be used when the controller does not have access
* to the NAND R/B pin.
*
* Be aware that calling this helper from an ->exec_op() implementation means
* ->exec_op() must be re-entrant.
*
* Return 0 if the NAND chip is ready, a negative error otherwise.
*/
int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
{
u8 status = 0;
int ret;
if (!chip->exec_op)
return -ENOTSUPP;
ret = nand_status_op(chip, NULL);
if (ret)
return ret;
timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
do {
ret = nand_read_data_op(chip, &status, sizeof(status), true);
if (ret)
break;
if (status & NAND_STATUS_READY)
break;
/*
* Typical lowest execution time for a tR on most NANDs is 10us,
* use this as polling delay before doing something smarter (ie.
* deriving a delay from the timeout value, timeout_ms/ratio).
*/
udelay(10);
} while (time_before(jiffies, timeout_ms));
/*
* We have to exit READ_STATUS mode in order to read real data on the
* bus in case the WAITRDY instruction is preceding a DATA_IN
* instruction.
*/
nand_exit_status_op(chip);
if (ret)
return ret;
return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
};
EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
/** /**
* nand_command - [DEFAULT] Send command to NAND device * nand_command - [DEFAULT] Send command to NAND device
* @mtd: MTD device structure * @mtd: MTD device structure
...@@ -1237,6 +1297,140 @@ static int nand_init_data_interface(struct nand_chip *chip) ...@@ -1237,6 +1297,140 @@ static int nand_init_data_interface(struct nand_chip *chip)
return 0; return 0;
} }
/**
* nand_fill_column_cycles - fill the column cycles of an address
* @chip: The NAND chip
* @addrs: Array of address cycles to fill
* @offset_in_page: The offset in the page
*
* Fills the first or the first two bytes of the @addrs field depending
* on the NAND bus width and the page size.
*
* Returns the number of cycles needed to encode the column, or a negative
* error code in case one of the arguments is invalid.
*/
static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
unsigned int offset_in_page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
/* Make sure the offset is less than the actual page size. */
if (offset_in_page > mtd->writesize + mtd->oobsize)
return -EINVAL;
/*
* On small page NANDs, there's a dedicated command to access the OOB
* area, and the column address is relative to the start of the OOB
* area, not the start of the page. Asjust the address accordingly.
*/
if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
offset_in_page -= mtd->writesize;
/*
* The offset in page is expressed in bytes, if the NAND bus is 16-bit
* wide, then it must be divided by 2.
*/
if (chip->options & NAND_BUSWIDTH_16) {
if (WARN_ON(offset_in_page % 2))
return -EINVAL;
offset_in_page /= 2;
}
addrs[0] = offset_in_page;
/*
* Small page NANDs use 1 cycle for the columns, while large page NANDs
* need 2
*/
if (mtd->writesize <= 512)
return 1;
addrs[1] = offset_in_page >> 8;
return 2;
}
static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, void *buf,
unsigned int len)
{
struct mtd_info *mtd = nand_to_mtd(chip);
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
u8 addrs[4];
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_READ0, 0),
NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
PSEC_TO_NSEC(sdr->tRR_min)),
NAND_OP_DATA_IN(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(instrs);
int ret;
/* Drop the DATA_IN instruction if len is set to 0. */
if (!len)
op.ninstrs--;
if (offset_in_page >= mtd->writesize)
instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
else if (offset_in_page >= 256 &&
!(chip->options & NAND_BUSWIDTH_16))
instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
if (ret < 0)
return ret;
addrs[1] = page;
addrs[2] = page >> 8;
if (chip->options & NAND_ROW_ADDR_3) {
addrs[3] = page >> 16;
instrs[1].ctx.addr.naddrs++;
}
return nand_exec_op(chip, &op);
}
static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, void *buf,
unsigned int len)
{
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
u8 addrs[5];
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_READ0, 0),
NAND_OP_ADDR(4, addrs, 0),
NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
PSEC_TO_NSEC(sdr->tRR_min)),
NAND_OP_DATA_IN(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(instrs);
int ret;
/* Drop the DATA_IN instruction if len is set to 0. */
if (!len)
op.ninstrs--;
ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
if (ret < 0)
return ret;
addrs[2] = page;
addrs[3] = page >> 8;
if (chip->options & NAND_ROW_ADDR_3) {
addrs[4] = page >> 16;
instrs[1].ctx.addr.naddrs++;
}
return nand_exec_op(chip, &op);
}
/** /**
* nand_read_page_op - Do a READ PAGE operation * nand_read_page_op - Do a READ PAGE operation
* @chip: The NAND chip * @chip: The NAND chip
...@@ -1261,6 +1455,16 @@ int nand_read_page_op(struct nand_chip *chip, unsigned int page, ...@@ -1261,6 +1455,16 @@ int nand_read_page_op(struct nand_chip *chip, unsigned int page,
if (offset_in_page + len > mtd->writesize + mtd->oobsize) if (offset_in_page + len > mtd->writesize + mtd->oobsize)
return -EINVAL; return -EINVAL;
if (chip->exec_op) {
if (mtd->writesize > 512)
return nand_lp_exec_read_page_op(chip, page,
offset_in_page, buf,
len);
return nand_sp_exec_read_page_op(chip, page, offset_in_page,
buf, len);
}
chip->cmdfunc(mtd, NAND_CMD_READ0, offset_in_page, page); chip->cmdfunc(mtd, NAND_CMD_READ0, offset_in_page, page);
if (len) if (len)
chip->read_buf(mtd, buf, len); chip->read_buf(mtd, buf, len);
...@@ -1291,6 +1495,25 @@ static int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf, ...@@ -1291,6 +1495,25 @@ static int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
if (len && !buf) if (len && !buf)
return -EINVAL; return -EINVAL;
if (chip->exec_op) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_PARAM, 0),
NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
PSEC_TO_NSEC(sdr->tRR_min)),
NAND_OP_8BIT_DATA_IN(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(instrs);
/* Drop the DATA_IN instruction if len is set to 0. */
if (!len)
op.ninstrs--;
return nand_exec_op(chip, &op);
}
chip->cmdfunc(mtd, NAND_CMD_PARAM, page, -1); chip->cmdfunc(mtd, NAND_CMD_PARAM, page, -1);
for (i = 0; i < len; i++) for (i = 0; i < len; i++)
p[i] = chip->read_byte(mtd); p[i] = chip->read_byte(mtd);
...@@ -1323,6 +1546,37 @@ int nand_change_read_column_op(struct nand_chip *chip, ...@@ -1323,6 +1546,37 @@ int nand_change_read_column_op(struct nand_chip *chip,
if (offset_in_page + len > mtd->writesize + mtd->oobsize) if (offset_in_page + len > mtd->writesize + mtd->oobsize)
return -EINVAL; return -EINVAL;
/* Small page NANDs do not support column change. */
if (mtd->writesize <= 512)
return -ENOTSUPP;
if (chip->exec_op) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
u8 addrs[2] = {};
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
NAND_OP_ADDR(2, addrs, 0),
NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
PSEC_TO_NSEC(sdr->tCCS_min)),
NAND_OP_DATA_IN(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(instrs);
int ret;
ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
if (ret < 0)
return ret;
/* Drop the DATA_IN instruction if len is set to 0. */
if (!len)
op.ninstrs--;
instrs[3].ctx.data.force_8bit = force_8bit;
return nand_exec_op(chip, &op);
}
chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset_in_page, -1); chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset_in_page, -1);
if (len) if (len)
chip->read_buf(mtd, buf, len); chip->read_buf(mtd, buf, len);
...@@ -1355,6 +1609,11 @@ int nand_read_oob_op(struct nand_chip *chip, unsigned int page, ...@@ -1355,6 +1609,11 @@ int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
if (offset_in_oob + len > mtd->oobsize) if (offset_in_oob + len > mtd->oobsize)
return -EINVAL; return -EINVAL;
if (chip->exec_op)
return nand_read_page_op(chip, page,
mtd->writesize + offset_in_oob,
buf, len);
chip->cmdfunc(mtd, NAND_CMD_READOOB, offset_in_oob, page); chip->cmdfunc(mtd, NAND_CMD_READOOB, offset_in_oob, page);
if (len) if (len)
chip->read_buf(mtd, buf, len); chip->read_buf(mtd, buf, len);
...@@ -1363,6 +1622,81 @@ int nand_read_oob_op(struct nand_chip *chip, unsigned int page, ...@@ -1363,6 +1622,81 @@ int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
} }
EXPORT_SYMBOL_GPL(nand_read_oob_op); EXPORT_SYMBOL_GPL(nand_read_oob_op);
static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, const void *buf,
unsigned int len, bool prog)
{
struct mtd_info *mtd = nand_to_mtd(chip);
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
u8 addrs[5] = {};
struct nand_op_instr instrs[] = {
/*
* The first instruction will be dropped if we're dealing
* with a large page NAND and adjusted if we're dealing
* with a small page NAND and the page offset is > 255.
*/
NAND_OP_CMD(NAND_CMD_READ0, 0),
NAND_OP_CMD(NAND_CMD_SEQIN, 0),
NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
NAND_OP_DATA_OUT(len, buf, 0),
NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
};
struct nand_operation op = NAND_OPERATION(instrs);
int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
int ret;
u8 status;
if (naddrs < 0)
return naddrs;
addrs[naddrs++] = page;
addrs[naddrs++] = page >> 8;
if (chip->options & NAND_ROW_ADDR_3)
addrs[naddrs++] = page >> 16;
instrs[2].ctx.addr.naddrs = naddrs;
/* Drop the last two instructions if we're not programming the page. */
if (!prog) {
op.ninstrs -= 2;
/* Also drop the DATA_OUT instruction if empty. */
if (!len)
op.ninstrs--;
}
if (mtd->writesize <= 512) {
/*
* Small pages need some more tweaking: we have to adjust the
* first instruction depending on the page offset we're trying
* to access.
*/
if (offset_in_page >= mtd->writesize)
instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
else if (offset_in_page >= 256 &&
!(chip->options & NAND_BUSWIDTH_16))
instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
} else {
/*
* Drop the first command if we're dealing with a large page
* NAND.
*/
op.instrs++;
op.ninstrs--;
}
ret = nand_exec_op(chip, &op);
if (!prog || ret)
return ret;
ret = nand_status_op(chip, &status);
if (ret)
return ret;
return status;
}
/** /**
* nand_prog_page_begin_op - starts a PROG PAGE operation * nand_prog_page_begin_op - starts a PROG PAGE operation
* @chip: The NAND chip * @chip: The NAND chip
...@@ -1388,6 +1722,10 @@ int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page, ...@@ -1388,6 +1722,10 @@ int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
if (offset_in_page + len > mtd->writesize + mtd->oobsize) if (offset_in_page + len > mtd->writesize + mtd->oobsize)
return -EINVAL; return -EINVAL;
if (chip->exec_op)
return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
len, false);
chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page); chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
if (buf) if (buf)
...@@ -1409,11 +1747,35 @@ EXPORT_SYMBOL_GPL(nand_prog_page_begin_op); ...@@ -1409,11 +1747,35 @@ EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
int nand_prog_page_end_op(struct nand_chip *chip) int nand_prog_page_end_op(struct nand_chip *chip)
{ {
struct mtd_info *mtd = nand_to_mtd(chip); struct mtd_info *mtd = nand_to_mtd(chip);
int status; int ret;
u8 status;
chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); if (chip->exec_op) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_PAGEPROG,
PSEC_TO_NSEC(sdr->tWB_max)),
NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
};
struct nand_operation op = NAND_OPERATION(instrs);
ret = nand_exec_op(chip, &op);
if (ret)
return ret;
ret = nand_status_op(chip, &status);
if (ret)
return ret;
} else {
chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
ret = chip->waitfunc(mtd, chip);
if (ret < 0)
return ret;
status = ret;
}
status = chip->waitfunc(mtd, chip);
if (status & NAND_STATUS_FAIL) if (status & NAND_STATUS_FAIL)
return -EIO; return -EIO;
...@@ -1447,11 +1809,16 @@ int nand_prog_page_op(struct nand_chip *chip, unsigned int page, ...@@ -1447,11 +1809,16 @@ int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
if (offset_in_page + len > mtd->writesize + mtd->oobsize) if (offset_in_page + len > mtd->writesize + mtd->oobsize)
return -EINVAL; return -EINVAL;
chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page); if (chip->exec_op) {
chip->write_buf(mtd, buf, len); status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); len, true);
} else {
chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
chip->write_buf(mtd, buf, len);
chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
status = chip->waitfunc(mtd, chip);
}
status = chip->waitfunc(mtd, chip);
if (status & NAND_STATUS_FAIL) if (status & NAND_STATUS_FAIL)
return -EIO; return -EIO;
...@@ -1485,6 +1852,35 @@ int nand_change_write_column_op(struct nand_chip *chip, ...@@ -1485,6 +1852,35 @@ int nand_change_write_column_op(struct nand_chip *chip,
if (offset_in_page + len > mtd->writesize + mtd->oobsize) if (offset_in_page + len > mtd->writesize + mtd->oobsize)
return -EINVAL; return -EINVAL;
/* Small page NANDs do not support column change. */
if (mtd->writesize <= 512)
return -ENOTSUPP;
if (chip->exec_op) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
u8 addrs[2];
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_RNDIN, 0),
NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
NAND_OP_DATA_OUT(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(instrs);
int ret;
ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
if (ret < 0)
return ret;
instrs[2].ctx.data.force_8bit = force_8bit;
/* Drop the DATA_OUT instruction if len is set to 0. */
if (!len)
op.ninstrs--;
return nand_exec_op(chip, &op);
}
chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset_in_page, -1); chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset_in_page, -1);
if (len) if (len)
chip->write_buf(mtd, buf, len); chip->write_buf(mtd, buf, len);
...@@ -1516,6 +1912,23 @@ int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf, ...@@ -1516,6 +1912,23 @@ int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
if (len && !buf) if (len && !buf)
return -EINVAL; return -EINVAL;
if (chip->exec_op) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_READID, 0),
NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
NAND_OP_8BIT_DATA_IN(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(instrs);
/* Drop the DATA_IN instruction if len is set to 0. */
if (!len)
op.ninstrs--;
return nand_exec_op(chip, &op);
}
chip->cmdfunc(mtd, NAND_CMD_READID, addr, -1); chip->cmdfunc(mtd, NAND_CMD_READID, addr, -1);
for (i = 0; i < len; i++) for (i = 0; i < len; i++)
...@@ -1540,6 +1953,22 @@ int nand_status_op(struct nand_chip *chip, u8 *status) ...@@ -1540,6 +1953,22 @@ int nand_status_op(struct nand_chip *chip, u8 *status)
{ {
struct mtd_info *mtd = nand_to_mtd(chip); struct mtd_info *mtd = nand_to_mtd(chip);
if (chip->exec_op) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_STATUS,
PSEC_TO_NSEC(sdr->tADL_min)),
NAND_OP_8BIT_DATA_IN(1, status, 0),
};
struct nand_operation op = NAND_OPERATION(instrs);
if (!status)
op.ninstrs--;
return nand_exec_op(chip, &op);
}
chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
if (status) if (status)
*status = chip->read_byte(mtd); *status = chip->read_byte(mtd);
...@@ -1563,6 +1992,15 @@ int nand_exit_status_op(struct nand_chip *chip) ...@@ -1563,6 +1992,15 @@ int nand_exit_status_op(struct nand_chip *chip)
{ {
struct mtd_info *mtd = nand_to_mtd(chip); struct mtd_info *mtd = nand_to_mtd(chip);
if (chip->exec_op) {
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_READ0, 0),
};
struct nand_operation op = NAND_OPERATION(instrs);
return nand_exec_op(chip, &op);
}
chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1); chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1);
return 0; return 0;
...@@ -1585,14 +2023,42 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock) ...@@ -1585,14 +2023,42 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
struct mtd_info *mtd = nand_to_mtd(chip); struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int page = eraseblock << unsigned int page = eraseblock <<
(chip->phys_erase_shift - chip->page_shift); (chip->phys_erase_shift - chip->page_shift);
int status; int ret;
u8 status;
chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page); if (chip->exec_op) {
chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1); const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
u8 addrs[3] = { page, page >> 8, page >> 16 };
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_ERASE1, 0),
NAND_OP_ADDR(2, addrs, 0),
NAND_OP_CMD(NAND_CMD_ERASE2,
PSEC_TO_MSEC(sdr->tWB_max)),
NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
};
struct nand_operation op = NAND_OPERATION(instrs);
status = chip->waitfunc(mtd, chip); if (chip->options & NAND_ROW_ADDR_3)
if (status < 0) instrs[1].ctx.addr.naddrs++;
return status;
ret = nand_exec_op(chip, &op);
if (ret)
return ret;
ret = nand_status_op(chip, &status);
if (ret)
return ret;
} else {
chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
ret = chip->waitfunc(mtd, chip);
if (ret < 0)
return ret;
status = ret;
}
if (status & NAND_STATUS_FAIL) if (status & NAND_STATUS_FAIL)
return -EIO; return -EIO;
...@@ -1618,13 +2084,40 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature, ...@@ -1618,13 +2084,40 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature,
{ {
struct mtd_info *mtd = nand_to_mtd(chip); struct mtd_info *mtd = nand_to_mtd(chip);
const u8 *params = data; const u8 *params = data;
int i, status; int i, ret;
u8 status;
chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1); if (chip->exec_op) {
for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i) const struct nand_sdr_timings *sdr =
chip->write_byte(mtd, params[i]); nand_get_sdr_timings(&chip->data_interface);
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
PSEC_TO_NSEC(sdr->tWB_max)),
NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
};
struct nand_operation op = NAND_OPERATION(instrs);
ret = nand_exec_op(chip, &op);
if (ret)
return ret;
ret = nand_status_op(chip, &status);
if (ret)
return ret;
} else {
chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1);
for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
chip->write_byte(mtd, params[i]);
ret = chip->waitfunc(mtd, chip);
if (ret < 0)
return ret;
status = ret;
}
status = chip->waitfunc(mtd, chip);
if (status & NAND_STATUS_FAIL) if (status & NAND_STATUS_FAIL)
return -EIO; return -EIO;
...@@ -1650,6 +2143,22 @@ static int nand_get_features_op(struct nand_chip *chip, u8 feature, ...@@ -1650,6 +2143,22 @@ static int nand_get_features_op(struct nand_chip *chip, u8 feature,
u8 *params = data; u8 *params = data;
int i; int i;
if (chip->exec_op) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
PSEC_TO_NSEC(sdr->tRR_min)),
NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
data, 0),
};
struct nand_operation op = NAND_OPERATION(instrs);
return nand_exec_op(chip, &op);
}
chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, feature, -1); chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, feature, -1);
for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i) for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
params[i] = chip->read_byte(mtd); params[i] = chip->read_byte(mtd);
...@@ -1671,6 +2180,18 @@ int nand_reset_op(struct nand_chip *chip) ...@@ -1671,6 +2180,18 @@ int nand_reset_op(struct nand_chip *chip)
{ {
struct mtd_info *mtd = nand_to_mtd(chip); struct mtd_info *mtd = nand_to_mtd(chip);
if (chip->exec_op) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
};
struct nand_operation op = NAND_OPERATION(instrs);
return nand_exec_op(chip, &op);
}
chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
return 0; return 0;
...@@ -1698,6 +2219,17 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, ...@@ -1698,6 +2219,17 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
if (!len || !buf) if (!len || !buf)
return -EINVAL; return -EINVAL;
if (chip->exec_op) {
struct nand_op_instr instrs[] = {
NAND_OP_DATA_IN(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(instrs);
instrs[0].ctx.data.force_8bit = force_8bit;
return nand_exec_op(chip, &op);
}
if (force_8bit) { if (force_8bit) {
u8 *p = buf; u8 *p = buf;
unsigned int i; unsigned int i;
...@@ -1733,6 +2265,17 @@ int nand_write_data_op(struct nand_chip *chip, const void *buf, ...@@ -1733,6 +2265,17 @@ int nand_write_data_op(struct nand_chip *chip, const void *buf,
if (!len || !buf) if (!len || !buf)
return -EINVAL; return -EINVAL;
if (chip->exec_op) {
struct nand_op_instr instrs[] = {
NAND_OP_DATA_OUT(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(instrs);
instrs[0].ctx.data.force_8bit = force_8bit;
return nand_exec_op(chip, &op);
}
if (force_8bit) { if (force_8bit) {
const u8 *p = buf; const u8 *p = buf;
unsigned int i; unsigned int i;
...@@ -1747,6 +2290,420 @@ int nand_write_data_op(struct nand_chip *chip, const void *buf, ...@@ -1747,6 +2290,420 @@ int nand_write_data_op(struct nand_chip *chip, const void *buf,
} }
EXPORT_SYMBOL_GPL(nand_write_data_op); EXPORT_SYMBOL_GPL(nand_write_data_op);
/**
* struct nand_op_parser_ctx - Context used by the parser
* @instrs: array of all the instructions that must be addressed
* @ninstrs: length of the @instrs array
* @subop: Sub-operation to be passed to the NAND controller
*
* This structure is used by the core to split NAND operations into
* sub-operations that can be handled by the NAND controller.
*/
struct nand_op_parser_ctx {
const struct nand_op_instr *instrs;
unsigned int ninstrs;
struct nand_subop subop;
};
/**
* nand_op_parser_must_split_instr - Checks if an instruction must be split
* @pat: the parser pattern element that matches @instr
* @instr: pointer to the instruction to check
* @start_offset: this is an in/out parameter. If @instr has already been
* split, then @start_offset is the offset from which to start
* (either an address cycle or an offset in the data buffer).
* Conversely, if the function returns true (ie. instr must be
* split), this parameter is updated to point to the first
* data/address cycle that has not been taken care of.
*
* Some NAND controllers are limited and cannot send X address cycles with a
* unique operation, or cannot read/write more than Y bytes at the same time.
* In this case, split the instruction that does not fit in a single
* controller-operation into two or more chunks.
*
* Returns true if the instruction must be split, false otherwise.
* The @start_offset parameter is also updated to the offset at which the next
* bundle of instruction must start (if an address or a data instruction).
*/
static bool
nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
const struct nand_op_instr *instr,
unsigned int *start_offset)
{
switch (pat->type) {
case NAND_OP_ADDR_INSTR:
if (!pat->addr.maxcycles)
break;
if (instr->ctx.addr.naddrs - *start_offset >
pat->addr.maxcycles) {
*start_offset += pat->addr.maxcycles;
return true;
}
break;
case NAND_OP_DATA_IN_INSTR:
case NAND_OP_DATA_OUT_INSTR:
if (!pat->data.maxlen)
break;
if (instr->ctx.data.len - *start_offset > pat->data.maxlen) {
*start_offset += pat->data.maxlen;
return true;
}
break;
default:
break;
}
return false;
}
/**
* nand_op_parser_match_pat - Checks if a pattern matches the instructions
* remaining in the parser context
* @pat: the pattern to test
* @ctx: the parser context structure to match with the pattern @pat
*
* Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
* Returns true if this is the case, false ortherwise. When true is returned,
* @ctx->subop is updated with the set of instructions to be passed to the
* controller driver.
*/
static bool
nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
struct nand_op_parser_ctx *ctx)
{
unsigned int instr_offset = ctx->subop.first_instr_start_off;
const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
const struct nand_op_instr *instr = ctx->subop.instrs;
unsigned int i, ninstrs;
for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
/*
* The pattern instruction does not match the operation
* instruction. If the instruction is marked optional in the
* pattern definition, we skip the pattern element and continue
* to the next one. If the element is mandatory, there's no
* match and we can return false directly.
*/
if (instr->type != pat->elems[i].type) {
if (!pat->elems[i].optional)
return false;
continue;
}
/*
* Now check the pattern element constraints. If the pattern is
* not able to handle the whole instruction in a single step,
* we have to split it.
* The last_instr_end_off value comes back updated to point to
* the position where we have to split the instruction (the
* start of the next subop chunk).
*/
if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
&instr_offset)) {
ninstrs++;
i++;
break;
}
instr++;
ninstrs++;
instr_offset = 0;
}
/*
* This can happen if all instructions of a pattern are optional.
* Still, if there's not at least one instruction handled by this
* pattern, this is not a match, and we should try the next one (if
* any).
*/
if (!ninstrs)
return false;
/*
* We had a match on the pattern head, but the pattern may be longer
* than the instructions we're asked to execute. We need to make sure
* there's no mandatory elements in the pattern tail.
*/
for (; i < pat->nelems; i++) {
if (!pat->elems[i].optional)
return false;
}
/*
* We have a match: update the subop structure accordingly and return
* true.
*/
ctx->subop.ninstrs = ninstrs;
ctx->subop.last_instr_end_off = instr_offset;
return true;
}
#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
{
const struct nand_op_instr *instr;
char *prefix = " ";
unsigned int i;
pr_debug("executing subop:\n");
for (i = 0; i < ctx->ninstrs; i++) {
instr = &ctx->instrs[i];
if (instr == &ctx->subop.instrs[0])
prefix = " ->";
switch (instr->type) {
case NAND_OP_CMD_INSTR:
pr_debug("%sCMD [0x%02x]\n", prefix,
instr->ctx.cmd.opcode);
break;
case NAND_OP_ADDR_INSTR:
pr_debug("%sADDR [%d cyc: %*ph]\n", prefix,
instr->ctx.addr.naddrs,
instr->ctx.addr.naddrs < 64 ?
instr->ctx.addr.naddrs : 64,
instr->ctx.addr.addrs);
break;
case NAND_OP_DATA_IN_INSTR:
pr_debug("%sDATA_IN [%d B%s]\n", prefix,
instr->ctx.data.len,
instr->ctx.data.force_8bit ?
", force 8-bit" : "");
break;
case NAND_OP_DATA_OUT_INSTR:
pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
instr->ctx.data.len,
instr->ctx.data.force_8bit ?
", force 8-bit" : "");
break;
case NAND_OP_WAITRDY_INSTR:
pr_debug("%sWAITRDY [max %d ms]\n", prefix,
instr->ctx.waitrdy.timeout_ms);
break;
}
if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
prefix = " ";
}
}
#else
static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
{
/* NOP */
}
#endif
/**
* nand_op_parser_exec_op - exec_op parser
* @chip: the NAND chip
* @parser: patterns description provided by the controller driver
* @op: the NAND operation to address
* @check_only: when true, the function only checks if @op can be handled but
* does not execute the operation
*
* Helper function designed to ease integration of NAND controller drivers that
* only support a limited set of instruction sequences. The supported sequences
* are described in @parser, and the framework takes care of splitting @op into
* multiple sub-operations (if required) and pass them back to the ->exec()
* callback of the matching pattern if @check_only is set to false.
*
* NAND controller drivers should call this function from their own ->exec_op()
* implementation.
*
* Returns 0 on success, a negative error code otherwise. A failure can be
* caused by an unsupported operation (none of the supported patterns is able
* to handle the requested operation), or an error returned by one of the
* matching pattern->exec() hook.
*/
int nand_op_parser_exec_op(struct nand_chip *chip,
const struct nand_op_parser *parser,
const struct nand_operation *op, bool check_only)
{
struct nand_op_parser_ctx ctx = {
.subop.instrs = op->instrs,
.instrs = op->instrs,
.ninstrs = op->ninstrs,
};
unsigned int i;
while (ctx.subop.instrs < op->instrs + op->ninstrs) {
int ret;
for (i = 0; i < parser->npatterns; i++) {
const struct nand_op_parser_pattern *pattern;
pattern = &parser->patterns[i];
if (!nand_op_parser_match_pat(pattern, &ctx))
continue;
nand_op_parser_trace(&ctx);
if (check_only)
break;
ret = pattern->exec(chip, &ctx.subop);
if (ret)
return ret;
break;
}
if (i == parser->npatterns) {
pr_debug("->exec_op() parser: pattern not found!\n");
return -ENOTSUPP;
}
/*
* Update the context structure by pointing to the start of the
* next subop.
*/
ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
if (ctx.subop.last_instr_end_off)
ctx.subop.instrs -= 1;
ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
}
return 0;
}
EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
static bool nand_instr_is_data(const struct nand_op_instr *instr)
{
return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
instr->type == NAND_OP_DATA_OUT_INSTR);
}
static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
unsigned int instr_idx)
{
return subop && instr_idx < subop->ninstrs;
}
static int nand_subop_get_start_off(const struct nand_subop *subop,
unsigned int instr_idx)
{
if (instr_idx)
return 0;
return subop->first_instr_start_off;
}
/**
* nand_subop_get_addr_start_off - Get the start offset in an address array
* @subop: The entire sub-operation
* @instr_idx: Index of the instruction inside the sub-operation
*
* During driver development, one could be tempted to directly use the
* ->addr.addrs field of address instructions. This is wrong as address
* instructions might be split.
*
* Given an address instruction, returns the offset of the first cycle to issue.
*/
int nand_subop_get_addr_start_off(const struct nand_subop *subop,
unsigned int instr_idx)
{
if (!nand_subop_instr_is_valid(subop, instr_idx) ||
subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
return -EINVAL;
return nand_subop_get_start_off(subop, instr_idx);
}
EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
/**
* nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
* @subop: The entire sub-operation
* @instr_idx: Index of the instruction inside the sub-operation
*
* During driver development, one could be tempted to directly use the
* ->addr->naddrs field of a data instruction. This is wrong as instructions
* might be split.
*
* Given an address instruction, returns the number of address cycle to issue.
*/
int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
unsigned int instr_idx)
{
int start_off, end_off;
if (!nand_subop_instr_is_valid(subop, instr_idx) ||
subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
return -EINVAL;
start_off = nand_subop_get_addr_start_off(subop, instr_idx);
if (instr_idx == subop->ninstrs - 1 &&
subop->last_instr_end_off)
end_off = subop->last_instr_end_off;
else
end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
return end_off - start_off;
}
EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
/**
* nand_subop_get_data_start_off - Get the start offset in a data array
* @subop: The entire sub-operation
* @instr_idx: Index of the instruction inside the sub-operation
*
* During driver development, one could be tempted to directly use the
* ->data->buf.{in,out} field of data instructions. This is wrong as data
* instructions might be split.
*
* Given a data instruction, returns the offset to start from.
*/
int nand_subop_get_data_start_off(const struct nand_subop *subop,
unsigned int instr_idx)
{
if (!nand_subop_instr_is_valid(subop, instr_idx) ||
!nand_instr_is_data(&subop->instrs[instr_idx]))
return -EINVAL;
return nand_subop_get_start_off(subop, instr_idx);
}
EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
/**
* nand_subop_get_data_len - Get the number of bytes to retrieve
* @subop: The entire sub-operation
* @instr_idx: Index of the instruction inside the sub-operation
*
* During driver development, one could be tempted to directly use the
* ->data->len field of a data instruction. This is wrong as data instructions
* might be split.
*
* Returns the length of the chunk of data to send/receive.
*/
int nand_subop_get_data_len(const struct nand_subop *subop,
unsigned int instr_idx)
{
int start_off = 0, end_off;
if (!nand_subop_instr_is_valid(subop, instr_idx) ||
!nand_instr_is_data(&subop->instrs[instr_idx]))
return -EINVAL;
start_off = nand_subop_get_data_start_off(subop, instr_idx);
if (instr_idx == subop->ninstrs - 1 &&
subop->last_instr_end_off)
end_off = subop->last_instr_end_off;
else
end_off = subop->instrs[instr_idx].ctx.data.len;
return end_off - start_off;
}
EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
/** /**
* nand_reset - Reset and initialize a NAND device * nand_reset - Reset and initialize a NAND device
* @chip: The NAND chip * @chip: The NAND chip
...@@ -4002,7 +4959,7 @@ static void nand_set_defaults(struct nand_chip *chip) ...@@ -4002,7 +4959,7 @@ static void nand_set_defaults(struct nand_chip *chip)
chip->chip_delay = 20; chip->chip_delay = 20;
/* check, if a user supplied command function given */ /* check, if a user supplied command function given */
if (chip->cmdfunc == NULL) if (!chip->cmdfunc && !chip->exec_op)
chip->cmdfunc = nand_command; chip->cmdfunc = nand_command;
/* check, if a user supplied wait function given */ /* check, if a user supplied wait function given */
...@@ -4894,15 +5851,21 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips, ...@@ -4894,15 +5851,21 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
if (!mtd->name && mtd->dev.parent) if (!mtd->name && mtd->dev.parent)
mtd->name = dev_name(mtd->dev.parent); mtd->name = dev_name(mtd->dev.parent);
if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) { /*
* ->cmdfunc() is legacy and will only be used if ->exec_op() is not
* populated.
*/
if (!chip->exec_op) {
/* /*
* Default functions assigned for chip_select() and * Default functions assigned for ->cmdfunc() and
* cmdfunc() both expect cmd_ctrl() to be populated, * ->select_chip() both expect ->cmd_ctrl() to be populated.
* so we need to check that that's the case
*/ */
pr_err("chip.cmd_ctrl() callback is not provided"); if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
return -EINVAL; pr_err("->cmd_ctrl() should be provided\n");
return -EINVAL;
}
} }
/* Set the default functions */ /* Set the default functions */
nand_set_defaults(chip); nand_set_defaults(chip);
......
...@@ -81,6 +81,15 @@ static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd) ...@@ -81,6 +81,15 @@ static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd)
{ {
struct mtd_info *mtd = nand_to_mtd(chip); struct mtd_info *mtd = nand_to_mtd(chip);
if (chip->exec_op) {
struct nand_op_instr instrs[] = {
NAND_OP_CMD(cmd, 0),
};
struct nand_operation op = NAND_OPERATION(instrs);
return nand_exec_op(chip, &op);
}
chip->cmdfunc(mtd, cmd, -1, -1); chip->cmdfunc(mtd, cmd, -1, -1);
return 0; return 0;
......
...@@ -734,6 +734,350 @@ struct nand_manufacturer_ops { ...@@ -734,6 +734,350 @@ struct nand_manufacturer_ops {
void (*cleanup)(struct nand_chip *chip); void (*cleanup)(struct nand_chip *chip);
}; };
/**
* struct nand_op_cmd_instr - Definition of a command instruction
* @opcode: the command to issue in one cycle
*/
struct nand_op_cmd_instr {
u8 opcode;
};
/**
* struct nand_op_addr_instr - Definition of an address instruction
* @naddrs: length of the @addrs array
* @addrs: array containing the address cycles to issue
*/
struct nand_op_addr_instr {
unsigned int naddrs;
const u8 *addrs;
};
/**
* struct nand_op_data_instr - Definition of a data instruction
* @len: number of data bytes to move
* @in: buffer to fill when reading from the NAND chip
* @out: buffer to read from when writing to the NAND chip
* @force_8bit: force 8-bit access
*
* Please note that "in" and "out" are inverted from the ONFI specification
* and are from the controller perspective, so a "in" is a read from the NAND
* chip while a "out" is a write to the NAND chip.
*/
struct nand_op_data_instr {
unsigned int len;
union {
void *in;
const void *out;
} buf;
bool force_8bit;
};
/**
* struct nand_op_waitrdy_instr - Definition of a wait ready instruction
* @timeout_ms: maximum delay while waiting for the ready/busy pin in ms
*/
struct nand_op_waitrdy_instr {
unsigned int timeout_ms;
};
/**
* enum nand_op_instr_type - Definition of all instruction types
* @NAND_OP_CMD_INSTR: command instruction
* @NAND_OP_ADDR_INSTR: address instruction
* @NAND_OP_DATA_IN_INSTR: data in instruction
* @NAND_OP_DATA_OUT_INSTR: data out instruction
* @NAND_OP_WAITRDY_INSTR: wait ready instruction
*/
enum nand_op_instr_type {
NAND_OP_CMD_INSTR,
NAND_OP_ADDR_INSTR,
NAND_OP_DATA_IN_INSTR,
NAND_OP_DATA_OUT_INSTR,
NAND_OP_WAITRDY_INSTR,
};
/**
* struct nand_op_instr - Instruction object
* @type: the instruction type
* @cmd/@addr/@data/@waitrdy: extra data associated to the instruction.
* You'll have to use the appropriate element
* depending on @type
* @delay_ns: delay the controller should apply after the instruction has been
* issued on the bus. Most modern controllers have internal timings
* control logic, and in this case, the controller driver can ignore
* this field.
*/
struct nand_op_instr {
enum nand_op_instr_type type;
union {
struct nand_op_cmd_instr cmd;
struct nand_op_addr_instr addr;
struct nand_op_data_instr data;
struct nand_op_waitrdy_instr waitrdy;
} ctx;
unsigned int delay_ns;
};
/*
* Special handling must be done for the WAITRDY timeout parameter as it usually
* is either tPROG (after a prog), tR (before a read), tRST (during a reset) or
* tBERS (during an erase) which all of them are u64 values that cannot be
* divided by usual kernel macros and must be handled with the special
* DIV_ROUND_UP_ULL() macro.
*/
#define __DIVIDE(dividend, divisor) ({ \
sizeof(dividend) == sizeof(u32) ? \
DIV_ROUND_UP(dividend, divisor) : \
DIV_ROUND_UP_ULL(dividend, divisor); \
})
#define PSEC_TO_NSEC(x) __DIVIDE(x, 1000)
#define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000)
#define NAND_OP_CMD(id, ns) \
{ \
.type = NAND_OP_CMD_INSTR, \
.ctx.cmd.opcode = id, \
.delay_ns = ns, \
}
#define NAND_OP_ADDR(ncycles, cycles, ns) \
{ \
.type = NAND_OP_ADDR_INSTR, \
.ctx.addr = { \
.naddrs = ncycles, \
.addrs = cycles, \
}, \
.delay_ns = ns, \
}
#define NAND_OP_DATA_IN(l, b, ns) \
{ \
.type = NAND_OP_DATA_IN_INSTR, \
.ctx.data = { \
.len = l, \
.buf.in = b, \
.force_8bit = false, \
}, \
.delay_ns = ns, \
}
#define NAND_OP_DATA_OUT(l, b, ns) \
{ \
.type = NAND_OP_DATA_OUT_INSTR, \
.ctx.data = { \
.len = l, \
.buf.out = b, \
.force_8bit = false, \
}, \
.delay_ns = ns, \
}
#define NAND_OP_8BIT_DATA_IN(l, b, ns) \
{ \
.type = NAND_OP_DATA_IN_INSTR, \
.ctx.data = { \
.len = l, \
.buf.in = b, \
.force_8bit = true, \
}, \
.delay_ns = ns, \
}
#define NAND_OP_8BIT_DATA_OUT(l, b, ns) \
{ \
.type = NAND_OP_DATA_OUT_INSTR, \
.ctx.data = { \
.len = l, \
.buf.out = b, \
.force_8bit = true, \
}, \
.delay_ns = ns, \
}
#define NAND_OP_WAIT_RDY(tout_ms, ns) \
{ \
.type = NAND_OP_WAITRDY_INSTR, \
.ctx.waitrdy.timeout_ms = tout_ms, \
.delay_ns = ns, \
}
/**
* struct nand_subop - a sub operation
* @instrs: array of instructions
* @ninstrs: length of the @instrs array
* @first_instr_start_off: offset to start from for the first instruction
* of the sub-operation
* @last_instr_end_off: offset to end at (excluded) for the last instruction
* of the sub-operation
*
* Both @first_instr_start_off and @last_instr_end_off only apply to data or
* address instructions.
*
* When an operation cannot be handled as is by the NAND controller, it will
* be split by the parser into sub-operations which will be passed to the
* controller driver.
*/
struct nand_subop {
const struct nand_op_instr *instrs;
unsigned int ninstrs;
unsigned int first_instr_start_off;
unsigned int last_instr_end_off;
};
int nand_subop_get_addr_start_off(const struct nand_subop *subop,
unsigned int op_id);
int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
unsigned int op_id);
int nand_subop_get_data_start_off(const struct nand_subop *subop,
unsigned int op_id);
int nand_subop_get_data_len(const struct nand_subop *subop,
unsigned int op_id);
/**
* struct nand_op_parser_addr_constraints - Constraints for address instructions
* @maxcycles: maximum number of address cycles the controller can issue in a
* single step
*/
struct nand_op_parser_addr_constraints {
unsigned int maxcycles;
};
/**
* struct nand_op_parser_data_constraints - Constraints for data instructions
* @maxlen: maximum data length that the controller can handle in a single step
*/
struct nand_op_parser_data_constraints {
unsigned int maxlen;
};
/**
* struct nand_op_parser_pattern_elem - One element of a pattern
* @type: the instructuction type
* @optional: whether this element of the pattern is optional or mandatory
* @addr/@data: address or data constraint (number of cycles or data length)
*/
struct nand_op_parser_pattern_elem {
enum nand_op_instr_type type;
bool optional;
union {
struct nand_op_parser_addr_constraints addr;
struct nand_op_parser_data_constraints data;
};
};
#define NAND_OP_PARSER_PAT_CMD_ELEM(_opt) \
{ \
.type = NAND_OP_CMD_INSTR, \
.optional = _opt, \
}
#define NAND_OP_PARSER_PAT_ADDR_ELEM(_opt, _maxcycles) \
{ \
.type = NAND_OP_ADDR_INSTR, \
.optional = _opt, \
.addr.maxcycles = _maxcycles, \
}
#define NAND_OP_PARSER_PAT_DATA_IN_ELEM(_opt, _maxlen) \
{ \
.type = NAND_OP_DATA_IN_INSTR, \
.optional = _opt, \
.data.maxlen = _maxlen, \
}
#define NAND_OP_PARSER_PAT_DATA_OUT_ELEM(_opt, _maxlen) \
{ \
.type = NAND_OP_DATA_OUT_INSTR, \
.optional = _opt, \
.data.maxlen = _maxlen, \
}
#define NAND_OP_PARSER_PAT_WAITRDY_ELEM(_opt) \
{ \
.type = NAND_OP_WAITRDY_INSTR, \
.optional = _opt, \
}
/**
* struct nand_op_parser_pattern - NAND sub-operation pattern descriptor
* @elems: array of pattern elements
* @nelems: number of pattern elements in @elems array
* @exec: the function that will issue a sub-operation
*
* A pattern is a list of elements, each element reprensenting one instruction
* with its constraints. The pattern itself is used by the core to match NAND
* chip operation with NAND controller operations.
* Once a match between a NAND controller operation pattern and a NAND chip
* operation (or a sub-set of a NAND operation) is found, the pattern ->exec()
* hook is called so that the controller driver can issue the operation on the
* bus.
*
* Controller drivers should declare as many patterns as they support and pass
* this list of patterns (created with the help of the following macro) to
* the nand_op_parser_exec_op() helper.
*/
struct nand_op_parser_pattern {
const struct nand_op_parser_pattern_elem *elems;
unsigned int nelems;
int (*exec)(struct nand_chip *chip, const struct nand_subop *subop);
};
#define NAND_OP_PARSER_PATTERN(_exec, ...) \
{ \
.exec = _exec, \
.elems = (struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }, \
.nelems = sizeof((struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }) / \
sizeof(struct nand_op_parser_pattern_elem), \
}
/**
* struct nand_op_parser - NAND controller operation parser descriptor
* @patterns: array of supported patterns
* @npatterns: length of the @patterns array
*
* The parser descriptor is just an array of supported patterns which will be
* iterated by nand_op_parser_exec_op() everytime it tries to execute an
* NAND operation (or tries to determine if a specific operation is supported).
*
* It is worth mentioning that patterns will be tested in their declaration
* order, and the first match will be taken, so it's important to order patterns
* appropriately so that simple/inefficient patterns are placed at the end of
* the list. Usually, this is where you put single instruction patterns.
*/
struct nand_op_parser {
const struct nand_op_parser_pattern *patterns;
unsigned int npatterns;
};
#define NAND_OP_PARSER(...) \
{ \
.patterns = (struct nand_op_parser_pattern[]) { __VA_ARGS__ }, \
.npatterns = sizeof((struct nand_op_parser_pattern[]) { __VA_ARGS__ }) / \
sizeof(struct nand_op_parser_pattern), \
}
/**
* struct nand_operation - NAND operation descriptor
* @instrs: array of instructions to execute
* @ninstrs: length of the @instrs array
*
* The actual operation structure that will be passed to chip->exec_op().
*/
struct nand_operation {
const struct nand_op_instr *instrs;
unsigned int ninstrs;
};
#define NAND_OPERATION(_instrs) \
{ \
.instrs = _instrs, \
.ninstrs = ARRAY_SIZE(_instrs), \
}
int nand_op_parser_exec_op(struct nand_chip *chip,
const struct nand_op_parser *parser,
const struct nand_operation *op, bool check_only);
/** /**
* struct nand_chip - NAND Private Flash Chip Data * struct nand_chip - NAND Private Flash Chip Data
* @mtd: MTD device registered to the MTD framework * @mtd: MTD device registered to the MTD framework
...@@ -760,6 +1104,10 @@ struct nand_manufacturer_ops { ...@@ -760,6 +1104,10 @@ struct nand_manufacturer_ops {
* commands to the chip. * commands to the chip.
* @waitfunc: [REPLACEABLE] hardwarespecific function for wait on * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on
* ready. * ready.
* @exec_op: controller specific method to execute NAND operations.
* This method replaces ->cmdfunc(),
* ->{read,write}_{buf,byte,word}(), ->dev_ready() and
* ->waifunc().
* @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for
* setting the read-retry mode. Mostly needed for MLC NAND. * setting the read-retry mode. Mostly needed for MLC NAND.
* @ecc: [BOARDSPECIFIC] ECC control structure * @ecc: [BOARDSPECIFIC] ECC control structure
...@@ -859,6 +1207,9 @@ struct nand_chip { ...@@ -859,6 +1207,9 @@ struct nand_chip {
void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column, void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column,
int page_addr); int page_addr);
int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this); int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this);
int (*exec_op)(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only);
int (*erase)(struct mtd_info *mtd, int page); int (*erase)(struct mtd_info *mtd, int page);
int (*scan_bbt)(struct mtd_info *mtd); int (*scan_bbt)(struct mtd_info *mtd);
int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip, int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip,
...@@ -869,7 +1220,6 @@ struct nand_chip { ...@@ -869,7 +1220,6 @@ struct nand_chip {
int (*setup_data_interface)(struct mtd_info *mtd, int chipnr, int (*setup_data_interface)(struct mtd_info *mtd, int chipnr,
const struct nand_data_interface *conf); const struct nand_data_interface *conf);
int chip_delay; int chip_delay;
unsigned int options; unsigned int options;
unsigned int bbt_options; unsigned int bbt_options;
...@@ -929,6 +1279,15 @@ struct nand_chip { ...@@ -929,6 +1279,15 @@ struct nand_chip {
} manufacturer; } manufacturer;
}; };
static inline int nand_exec_op(struct nand_chip *chip,
const struct nand_operation *op)
{
if (!chip->exec_op)
return -ENOTSUPP;
return chip->exec_op(chip, op, false);
}
extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops; extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
...@@ -1320,4 +1679,11 @@ void nand_cleanup(struct nand_chip *chip); ...@@ -1320,4 +1679,11 @@ void nand_cleanup(struct nand_chip *chip);
/* Default extended ID decoding function */ /* Default extended ID decoding function */
void nand_decode_ext_id(struct nand_chip *chip); void nand_decode_ext_id(struct nand_chip *chip);
/*
* External helper for controller drivers that have to implement the WAITRDY
* instruction and have no physical pin to check it.
*/
int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
#endif /* __LINUX_MTD_RAWNAND_H */ #endif /* __LINUX_MTD_RAWNAND_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment