Commit 1fa4aad4 authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/pub/scm/linux/kernel/git/tglx/mtd-2.6

parents 9fb1759a 19870da7
...@@ -59,7 +59,7 @@ ...@@ -59,7 +59,7 @@
* The AG-AND chips have nice features for speed improvement, * The AG-AND chips have nice features for speed improvement,
* which are not supported yet. Read / program 4 pages in one go. * which are not supported yet. Read / program 4 pages in one go.
* *
* $Id: nand_base.c,v 1.146 2005/06/17 15:02:06 gleixner Exp $ * $Id: nand_base.c,v 1.147 2005/07/15 07:18:06 gleixner Exp $
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -1410,16 +1410,6 @@ static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len, size_t ...@@ -1410,16 +1410,6 @@ static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len, size_t
this->read_buf(mtd, &buf[i], thislen); this->read_buf(mtd, &buf[i], thislen);
i += thislen; i += thislen;
/* Apply delay or wait for ready/busy pin
* Do this before the AUTOINCR check, so no problems
* arise if a chip which does auto increment
* is marked as NOAUTOINCR by the board driver.
*/
if (!this->dev_ready)
udelay (this->chip_delay);
else
nand_wait_ready(mtd);
/* Read more ? */ /* Read more ? */
if (i < len) { if (i < len) {
page++; page++;
...@@ -1432,6 +1422,16 @@ static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len, size_t ...@@ -1432,6 +1422,16 @@ static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len, size_t
this->select_chip(mtd, chipnr); this->select_chip(mtd, chipnr);
} }
/* Apply delay or wait for ready/busy pin
* Do this before the AUTOINCR check, so no problems
* arise if a chip which does auto increment
* is marked as NOAUTOINCR by the board driver.
*/
if (!this->dev_ready)
udelay (this->chip_delay);
else
nand_wait_ready(mtd);
/* Check, if the chip supports auto page increment /* Check, if the chip supports auto page increment
* or if we have hit a block boundary. * or if we have hit a block boundary.
*/ */
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* *
* Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de) * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
* *
* $Id: nand_bbt.c,v 1.33 2005/06/14 15:47:56 gleixner Exp $ * $Id: nand_bbt.c,v 1.35 2005/07/15 13:53:47 gleixner Exp $
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -109,24 +109,21 @@ static int check_pattern (uint8_t *buf, int len, int paglen, struct nand_bbt_des ...@@ -109,24 +109,21 @@ static int check_pattern (uint8_t *buf, int len, int paglen, struct nand_bbt_des
/** /**
* check_short_pattern - [GENERIC] check if a pattern is in the buffer * check_short_pattern - [GENERIC] check if a pattern is in the buffer
* @buf: the buffer to search * @buf: the buffer to search
* @len: the length of buffer to search
* @paglen: the pagelength
* @td: search pattern descriptor * @td: search pattern descriptor
* *
* Check for a pattern at the given place. Used to search bad block * Check for a pattern at the given place. Used to search bad block
* tables and good / bad block identifiers. Same as check_pattern, but * tables and good / bad block identifiers. Same as check_pattern, but
* no optional empty check and the pattern is expected to start * no optional empty check
* at offset 0.
* *
*/ */
static int check_short_pattern (uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td) static int check_short_pattern (uint8_t *buf, struct nand_bbt_descr *td)
{ {
int i; int i;
uint8_t *p = buf; uint8_t *p = buf;
/* Compare the pattern */ /* Compare the pattern */
for (i = 0; i < td->len; i++) { for (i = 0; i < td->len; i++) {
if (p[i] != td->pattern[i]) if (p[td->offs + i] != td->pattern[i])
return -1; return -1;
} }
return 0; return 0;
...@@ -337,13 +334,14 @@ static int create_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr ...@@ -337,13 +334,14 @@ static int create_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
if (!(bd->options & NAND_BBT_SCANEMPTY)) { if (!(bd->options & NAND_BBT_SCANEMPTY)) {
size_t retlen; size_t retlen;
/* No need to read pages fully, just read required OOB bytes */ /* Read the full oob until read_oob is fixed to
ret = mtd->read_oob(mtd, from + j * mtd->oobblock + bd->offs, * handle single byte reads for 16 bit buswidth */
readlen, &retlen, &buf[0]); ret = mtd->read_oob(mtd, from + j * mtd->oobblock,
mtd->oobsize, &retlen, buf);
if (ret) if (ret)
return ret; return ret;
if (check_short_pattern (&buf[j * scanlen], scanlen, mtd->oobblock, bd)) { if (check_short_pattern (buf, bd)) {
this->bbt[i >> 3] |= 0x03 << (i & 0x6); this->bbt[i >> 3] |= 0x03 << (i & 0x6);
printk (KERN_WARNING "Bad eraseblock %d at 0x%08x\n", printk (KERN_WARNING "Bad eraseblock %d at 0x%08x\n",
i >> 1, (unsigned int) from); i >> 1, (unsigned int) from);
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* *
* For licensing information, see the file 'LICENCE' in this directory. * For licensing information, see the file 'LICENCE' in this directory.
* *
* $Id: erase.c,v 1.76 2005/05/03 15:11:40 dedekind Exp $ * $Id: erase.c,v 1.80 2005/07/14 19:46:24 joern Exp $
* *
*/ */
...@@ -300,100 +300,86 @@ static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_erase ...@@ -300,100 +300,86 @@ static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_erase
jeb->last_node = NULL; jeb->last_node = NULL;
} }
static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset)
{ {
struct jffs2_raw_node_ref *marker_ref = NULL; void *ebuf;
unsigned char *ebuf; uint32_t ofs;
size_t retlen; size_t retlen;
int ret; int ret = -EIO;
uint32_t bad_offset;
if ((!jffs2_cleanmarker_oob(c)) && (c->cleanmarker_size > 0)) {
marker_ref = jffs2_alloc_raw_node_ref();
if (!marker_ref) {
printk(KERN_WARNING "Failed to allocate raw node ref for clean marker\n");
/* Stick it back on the list from whence it came and come back later */
jffs2_erase_pending_trigger(c);
spin_lock(&c->erase_completion_lock);
list_add(&jeb->list, &c->erase_complete_list);
spin_unlock(&c->erase_completion_lock);
return;
}
}
ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL); ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!ebuf) { if (!ebuf) {
printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Assuming it worked\n", jeb->offset); printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", jeb->offset);
} else { return -EAGAIN;
uint32_t ofs = jeb->offset; }
D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset)); D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset));
while(ofs < jeb->offset + c->sector_size) {
for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) {
uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs); uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs);
int i; int i;
bad_offset = ofs; *bad_offset = ofs;
ret = c->mtd->read(c->mtd, ofs, readlen, &retlen, ebuf);
ret = jffs2_flash_read(c, ofs, readlen, &retlen, ebuf);
if (ret) { if (ret) {
printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret); printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret);
goto bad; goto fail;
} }
if (retlen != readlen) { if (retlen != readlen) {
printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen); printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen);
goto bad; goto fail;
} }
for (i=0; i<readlen; i += sizeof(unsigned long)) { for (i=0; i<readlen; i += sizeof(unsigned long)) {
/* It's OK. We know it's properly aligned */ /* It's OK. We know it's properly aligned */
unsigned long datum = *(unsigned long *)(&ebuf[i]); unsigned long *datum = ebuf + i;
if (datum + 1) { if (*datum + 1) {
bad_offset += i; *bad_offset += i;
printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", datum, bad_offset); printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", *datum, *bad_offset);
bad: goto fail;
if ((!jffs2_cleanmarker_oob(c)) && (c->cleanmarker_size > 0))
jffs2_free_raw_node_ref(marker_ref);
kfree(ebuf);
bad2:
spin_lock(&c->erase_completion_lock);
/* Stick it on a list (any list) so
erase_failed can take it right off
again. Silly, but shouldn't happen
often. */
list_add(&jeb->list, &c->erasing_list);
spin_unlock(&c->erase_completion_lock);
jffs2_erase_failed(c, jeb, bad_offset);
return;
} }
} }
ofs += readlen; ofs += readlen;
cond_resched(); cond_resched();
} }
ret = 0;
fail:
kfree(ebuf); kfree(ebuf);
} return ret;
}
bad_offset = jeb->offset; static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
struct jffs2_raw_node_ref *marker_ref = NULL;
size_t retlen;
int ret;
uint32_t bad_offset;
switch (jffs2_block_check_erase(c, jeb, &bad_offset)) {
case -EAGAIN: goto refile;
case -EIO: goto filebad;
}
/* Write the erase complete marker */ /* Write the erase complete marker */
D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset)); D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset));
if (jffs2_cleanmarker_oob(c)) { bad_offset = jeb->offset;
/* Cleanmarker in oob area or no cleanmarker at all ? */
if (jffs2_cleanmarker_oob(c) || c->cleanmarker_size == 0) {
if (jffs2_cleanmarker_oob(c)) {
if (jffs2_write_nand_cleanmarker(c, jeb)) if (jffs2_write_nand_cleanmarker(c, jeb))
goto bad2; goto filebad;
}
jeb->first_node = jeb->last_node = NULL; jeb->first_node = jeb->last_node = NULL;
jeb->free_size = c->sector_size; jeb->free_size = c->sector_size;
jeb->used_size = 0; jeb->used_size = 0;
jeb->dirty_size = 0; jeb->dirty_size = 0;
jeb->wasted_size = 0; jeb->wasted_size = 0;
} else if (c->cleanmarker_size == 0) {
jeb->first_node = jeb->last_node = NULL;
jeb->free_size = c->sector_size;
jeb->used_size = 0;
jeb->dirty_size = 0;
jeb->wasted_size = 0;
} else { } else {
struct kvec vecs[1]; struct kvec vecs[1];
struct jffs2_unknown_node marker = { struct jffs2_unknown_node marker = {
.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK), .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
...@@ -401,21 +387,28 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb ...@@ -401,21 +387,28 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
.totlen = cpu_to_je32(c->cleanmarker_size) .totlen = cpu_to_je32(c->cleanmarker_size)
}; };
marker_ref = jffs2_alloc_raw_node_ref();
if (!marker_ref) {
printk(KERN_WARNING "Failed to allocate raw node ref for clean marker. Refiling\n");
goto refile;
}
marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4)); marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4));
vecs[0].iov_base = (unsigned char *) &marker; vecs[0].iov_base = (unsigned char *) &marker;
vecs[0].iov_len = sizeof(marker); vecs[0].iov_len = sizeof(marker);
ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen); ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen);
if (ret) { if (ret || retlen != sizeof(marker)) {
if (ret)
printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n", printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n",
jeb->offset, ret); jeb->offset, ret);
goto bad2; else
}
if (retlen != sizeof(marker)) {
printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n", printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n",
jeb->offset, sizeof(marker), retlen); jeb->offset, sizeof(marker), retlen);
goto bad2;
jffs2_free_raw_node_ref(marker_ref);
goto filebad;
} }
marker_ref->next_in_ino = NULL; marker_ref->next_in_ino = NULL;
...@@ -444,5 +437,22 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb ...@@ -444,5 +437,22 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
c->nr_free_blocks++; c->nr_free_blocks++;
spin_unlock(&c->erase_completion_lock); spin_unlock(&c->erase_completion_lock);
wake_up(&c->erase_wait); wake_up(&c->erase_wait);
} return;
filebad:
spin_lock(&c->erase_completion_lock);
/* Stick it on a list (any list) so erase_failed can take it
right off again. Silly, but shouldn't happen often. */
list_add(&jeb->list, &c->erasing_list);
spin_unlock(&c->erase_completion_lock);
jffs2_erase_failed(c, jeb, bad_offset);
return;
refile:
/* Stick it back on the list from whence it came and come back later */
jffs2_erase_pending_trigger(c);
spin_lock(&c->erase_completion_lock);
list_add(&jeb->list, &c->erase_complete_list);
spin_unlock(&c->erase_completion_lock);
return;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment