Commit eb08d8ff authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'linux-next' of git://git.infradead.org/ubifs-2.6

* 'linux-next' of git://git.infradead.org/ubifs-2.6: (52 commits)
  UBIFS: switch to dynamic printks
  UBIFS: fix kernel-doc comments
  UBIFS: fix extremely rare mount failure
  UBIFS: simplify LEB recovery function further
  UBIFS: always cleanup the recovered LEB
  UBIFS: clean up LEB recovery function
  UBIFS: fix-up free space on mount if flag is set
  UBIFS: add the fixup function
  UBIFS: add a superblock flag for free space fix-up
  UBIFS: share the next_log_lnum helper
  UBIFS: expect corruption only in last journal head LEBs
  UBIFS: synchronize write-buffer before switching to the next bud
  UBIFS: remove BUG statement
  UBIFS: change bud replay function conventions
  UBIFS: substitute the replay tree with a replay list
  UBIFS: simplify replay
  UBIFS: store free and dirty space in the bud replay entry
  UBIFS: remove unnecessary stack variable
  UBIFS: double check that buds are replied in order
  UBIFS: make 2 functions static
  ...
parents 9f22aae0 56e46742
...@@ -115,28 +115,8 @@ ubi.mtd=0 root=ubi0:rootfs rootfstype=ubifs ...@@ -115,28 +115,8 @@ ubi.mtd=0 root=ubi0:rootfs rootfstype=ubifs
Module Parameters for Debugging Module Parameters for Debugging
=============================== ===============================
When UBIFS has been compiled with debugging enabled, there are 3 module When UBIFS has been compiled with debugging enabled, there are 2 module
parameters that are available to control aspects of testing and debugging. parameters that are available to control aspects of testing and debugging.
The parameters are unsigned integers where each bit controls an option.
The parameters are:
debug_msgs Selects which debug messages to display, as follows:
Message Type Flag value
General messages 1
Journal messages 2
Mount messages 4
Commit messages 8
LEB search messages 16
Budgeting messages 32
Garbage collection messages 64
Tree Node Cache (TNC) messages 128
LEB properties (lprops) messages 256
Input/output messages 512
Log messages 1024
Scan messages 2048
Recovery messages 4096
debug_chks Selects extra checks that UBIFS can do while running: debug_chks Selects extra checks that UBIFS can do while running:
...@@ -154,11 +134,9 @@ debug_tsts Selects a mode of testing, as follows: ...@@ -154,11 +134,9 @@ debug_tsts Selects a mode of testing, as follows:
Test mode Flag value Test mode Flag value
Force in-the-gaps method 2
Failure mode for recovery testing 4 Failure mode for recovery testing 4
For example, set debug_msgs to 5 to display General messages and Mount For example, set debug_chks to 3 to enable general and TNC checks.
messages.
References References
......
...@@ -106,7 +106,7 @@ static long long get_liability(struct ubifs_info *c) ...@@ -106,7 +106,7 @@ static long long get_liability(struct ubifs_info *c)
long long liab; long long liab;
spin_lock(&c->space_lock); spin_lock(&c->space_lock);
liab = c->budg_idx_growth + c->budg_data_growth + c->budg_dd_growth; liab = c->bi.idx_growth + c->bi.data_growth + c->bi.dd_growth;
spin_unlock(&c->space_lock); spin_unlock(&c->space_lock);
return liab; return liab;
} }
...@@ -180,7 +180,7 @@ int ubifs_calc_min_idx_lebs(struct ubifs_info *c) ...@@ -180,7 +180,7 @@ int ubifs_calc_min_idx_lebs(struct ubifs_info *c)
int idx_lebs; int idx_lebs;
long long idx_size; long long idx_size;
idx_size = c->old_idx_sz + c->budg_idx_growth + c->budg_uncommitted_idx; idx_size = c->bi.old_idx_sz + c->bi.idx_growth + c->bi.uncommitted_idx;
/* And make sure we have thrice the index size of space reserved */ /* And make sure we have thrice the index size of space reserved */
idx_size += idx_size << 1; idx_size += idx_size << 1;
/* /*
...@@ -292,13 +292,13 @@ static int can_use_rp(struct ubifs_info *c) ...@@ -292,13 +292,13 @@ static int can_use_rp(struct ubifs_info *c)
* budgeted index space to the size of the current index, multiplies this by 3, * budgeted index space to the size of the current index, multiplies this by 3,
* and makes sure this does not exceed the amount of free LEBs. * and makes sure this does not exceed the amount of free LEBs.
* *
* Notes about @c->min_idx_lebs and @c->lst.idx_lebs variables: * Notes about @c->bi.min_idx_lebs and @c->lst.idx_lebs variables:
* o @c->lst.idx_lebs is the number of LEBs the index currently uses. It might * o @c->lst.idx_lebs is the number of LEBs the index currently uses. It might
* be large, because UBIFS does not do any index consolidation as long as * be large, because UBIFS does not do any index consolidation as long as
* there is free space. IOW, the index may take a lot of LEBs, but the LEBs * there is free space. IOW, the index may take a lot of LEBs, but the LEBs
* will contain a lot of dirt. * will contain a lot of dirt.
* o @c->min_idx_lebs is the number of LEBS the index presumably takes. IOW, * o @c->bi.min_idx_lebs is the number of LEBS the index presumably takes. IOW,
* the index may be consolidated to take up to @c->min_idx_lebs LEBs. * the index may be consolidated to take up to @c->bi.min_idx_lebs LEBs.
* *
* This function returns zero in case of success, and %-ENOSPC in case of * This function returns zero in case of success, and %-ENOSPC in case of
* failure. * failure.
...@@ -343,13 +343,13 @@ static int do_budget_space(struct ubifs_info *c) ...@@ -343,13 +343,13 @@ static int do_budget_space(struct ubifs_info *c)
c->lst.taken_empty_lebs; c->lst.taken_empty_lebs;
if (unlikely(rsvd_idx_lebs > lebs)) { if (unlikely(rsvd_idx_lebs > lebs)) {
dbg_budg("out of indexing space: min_idx_lebs %d (old %d), " dbg_budg("out of indexing space: min_idx_lebs %d (old %d), "
"rsvd_idx_lebs %d", min_idx_lebs, c->min_idx_lebs, "rsvd_idx_lebs %d", min_idx_lebs, c->bi.min_idx_lebs,
rsvd_idx_lebs); rsvd_idx_lebs);
return -ENOSPC; return -ENOSPC;
} }
available = ubifs_calc_available(c, min_idx_lebs); available = ubifs_calc_available(c, min_idx_lebs);
outstanding = c->budg_data_growth + c->budg_dd_growth; outstanding = c->bi.data_growth + c->bi.dd_growth;
if (unlikely(available < outstanding)) { if (unlikely(available < outstanding)) {
dbg_budg("out of data space: available %lld, outstanding %lld", dbg_budg("out of data space: available %lld, outstanding %lld",
...@@ -360,7 +360,7 @@ static int do_budget_space(struct ubifs_info *c) ...@@ -360,7 +360,7 @@ static int do_budget_space(struct ubifs_info *c)
if (available - outstanding <= c->rp_size && !can_use_rp(c)) if (available - outstanding <= c->rp_size && !can_use_rp(c))
return -ENOSPC; return -ENOSPC;
c->min_idx_lebs = min_idx_lebs; c->bi.min_idx_lebs = min_idx_lebs;
return 0; return 0;
} }
...@@ -393,11 +393,11 @@ static int calc_data_growth(const struct ubifs_info *c, ...@@ -393,11 +393,11 @@ static int calc_data_growth(const struct ubifs_info *c,
{ {
int data_growth; int data_growth;
data_growth = req->new_ino ? c->inode_budget : 0; data_growth = req->new_ino ? c->bi.inode_budget : 0;
if (req->new_page) if (req->new_page)
data_growth += c->page_budget; data_growth += c->bi.page_budget;
if (req->new_dent) if (req->new_dent)
data_growth += c->dent_budget; data_growth += c->bi.dent_budget;
data_growth += req->new_ino_d; data_growth += req->new_ino_d;
return data_growth; return data_growth;
} }
...@@ -413,12 +413,12 @@ static int calc_dd_growth(const struct ubifs_info *c, ...@@ -413,12 +413,12 @@ static int calc_dd_growth(const struct ubifs_info *c,
{ {
int dd_growth; int dd_growth;
dd_growth = req->dirtied_page ? c->page_budget : 0; dd_growth = req->dirtied_page ? c->bi.page_budget : 0;
if (req->dirtied_ino) if (req->dirtied_ino)
dd_growth += c->inode_budget << (req->dirtied_ino - 1); dd_growth += c->bi.inode_budget << (req->dirtied_ino - 1);
if (req->mod_dent) if (req->mod_dent)
dd_growth += c->dent_budget; dd_growth += c->bi.dent_budget;
dd_growth += req->dirtied_ino_d; dd_growth += req->dirtied_ino_d;
return dd_growth; return dd_growth;
} }
...@@ -460,19 +460,19 @@ int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req) ...@@ -460,19 +460,19 @@ int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req)
again: again:
spin_lock(&c->space_lock); spin_lock(&c->space_lock);
ubifs_assert(c->budg_idx_growth >= 0); ubifs_assert(c->bi.idx_growth >= 0);
ubifs_assert(c->budg_data_growth >= 0); ubifs_assert(c->bi.data_growth >= 0);
ubifs_assert(c->budg_dd_growth >= 0); ubifs_assert(c->bi.dd_growth >= 0);
if (unlikely(c->nospace) && (c->nospace_rp || !can_use_rp(c))) { if (unlikely(c->bi.nospace) && (c->bi.nospace_rp || !can_use_rp(c))) {
dbg_budg("no space"); dbg_budg("no space");
spin_unlock(&c->space_lock); spin_unlock(&c->space_lock);
return -ENOSPC; return -ENOSPC;
} }
c->budg_idx_growth += idx_growth; c->bi.idx_growth += idx_growth;
c->budg_data_growth += data_growth; c->bi.data_growth += data_growth;
c->budg_dd_growth += dd_growth; c->bi.dd_growth += dd_growth;
err = do_budget_space(c); err = do_budget_space(c);
if (likely(!err)) { if (likely(!err)) {
...@@ -484,9 +484,9 @@ int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req) ...@@ -484,9 +484,9 @@ int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req)
} }
/* Restore the old values */ /* Restore the old values */
c->budg_idx_growth -= idx_growth; c->bi.idx_growth -= idx_growth;
c->budg_data_growth -= data_growth; c->bi.data_growth -= data_growth;
c->budg_dd_growth -= dd_growth; c->bi.dd_growth -= dd_growth;
spin_unlock(&c->space_lock); spin_unlock(&c->space_lock);
if (req->fast) { if (req->fast) {
...@@ -506,9 +506,9 @@ int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req) ...@@ -506,9 +506,9 @@ int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req)
goto again; goto again;
} }
dbg_budg("FS is full, -ENOSPC"); dbg_budg("FS is full, -ENOSPC");
c->nospace = 1; c->bi.nospace = 1;
if (can_use_rp(c) || c->rp_size == 0) if (can_use_rp(c) || c->rp_size == 0)
c->nospace_rp = 1; c->bi.nospace_rp = 1;
smp_wmb(); smp_wmb();
} else } else
ubifs_err("cannot budget space, error %d", err); ubifs_err("cannot budget space, error %d", err);
...@@ -523,8 +523,8 @@ int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req) ...@@ -523,8 +523,8 @@ int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req)
* This function releases the space budgeted by 'ubifs_budget_space()'. Note, * This function releases the space budgeted by 'ubifs_budget_space()'. Note,
* since the index changes (which were budgeted for in @req->idx_growth) will * since the index changes (which were budgeted for in @req->idx_growth) will
* only be written to the media on commit, this function moves the index budget * only be written to the media on commit, this function moves the index budget
* from @c->budg_idx_growth to @c->budg_uncommitted_idx. The latter will be * from @c->bi.idx_growth to @c->bi.uncommitted_idx. The latter will be zeroed
* zeroed by the commit operation. * by the commit operation.
*/ */
void ubifs_release_budget(struct ubifs_info *c, struct ubifs_budget_req *req) void ubifs_release_budget(struct ubifs_info *c, struct ubifs_budget_req *req)
{ {
...@@ -553,23 +553,23 @@ void ubifs_release_budget(struct ubifs_info *c, struct ubifs_budget_req *req) ...@@ -553,23 +553,23 @@ void ubifs_release_budget(struct ubifs_info *c, struct ubifs_budget_req *req)
if (!req->data_growth && !req->dd_growth) if (!req->data_growth && !req->dd_growth)
return; return;
c->nospace = c->nospace_rp = 0; c->bi.nospace = c->bi.nospace_rp = 0;
smp_wmb(); smp_wmb();
spin_lock(&c->space_lock); spin_lock(&c->space_lock);
c->budg_idx_growth -= req->idx_growth; c->bi.idx_growth -= req->idx_growth;
c->budg_uncommitted_idx += req->idx_growth; c->bi.uncommitted_idx += req->idx_growth;
c->budg_data_growth -= req->data_growth; c->bi.data_growth -= req->data_growth;
c->budg_dd_growth -= req->dd_growth; c->bi.dd_growth -= req->dd_growth;
c->min_idx_lebs = ubifs_calc_min_idx_lebs(c); c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
ubifs_assert(c->budg_idx_growth >= 0); ubifs_assert(c->bi.idx_growth >= 0);
ubifs_assert(c->budg_data_growth >= 0); ubifs_assert(c->bi.data_growth >= 0);
ubifs_assert(c->budg_dd_growth >= 0); ubifs_assert(c->bi.dd_growth >= 0);
ubifs_assert(c->min_idx_lebs < c->main_lebs); ubifs_assert(c->bi.min_idx_lebs < c->main_lebs);
ubifs_assert(!(c->budg_idx_growth & 7)); ubifs_assert(!(c->bi.idx_growth & 7));
ubifs_assert(!(c->budg_data_growth & 7)); ubifs_assert(!(c->bi.data_growth & 7));
ubifs_assert(!(c->budg_dd_growth & 7)); ubifs_assert(!(c->bi.dd_growth & 7));
spin_unlock(&c->space_lock); spin_unlock(&c->space_lock);
} }
...@@ -586,13 +586,13 @@ void ubifs_convert_page_budget(struct ubifs_info *c) ...@@ -586,13 +586,13 @@ void ubifs_convert_page_budget(struct ubifs_info *c)
{ {
spin_lock(&c->space_lock); spin_lock(&c->space_lock);
/* Release the index growth reservation */ /* Release the index growth reservation */
c->budg_idx_growth -= c->max_idx_node_sz << UBIFS_BLOCKS_PER_PAGE_SHIFT; c->bi.idx_growth -= c->max_idx_node_sz << UBIFS_BLOCKS_PER_PAGE_SHIFT;
/* Release the data growth reservation */ /* Release the data growth reservation */
c->budg_data_growth -= c->page_budget; c->bi.data_growth -= c->bi.page_budget;
/* Increase the dirty data growth reservation instead */ /* Increase the dirty data growth reservation instead */
c->budg_dd_growth += c->page_budget; c->bi.dd_growth += c->bi.page_budget;
/* And re-calculate the indexing space reservation */ /* And re-calculate the indexing space reservation */
c->min_idx_lebs = ubifs_calc_min_idx_lebs(c); c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
spin_unlock(&c->space_lock); spin_unlock(&c->space_lock);
} }
...@@ -612,7 +612,7 @@ void ubifs_release_dirty_inode_budget(struct ubifs_info *c, ...@@ -612,7 +612,7 @@ void ubifs_release_dirty_inode_budget(struct ubifs_info *c,
memset(&req, 0, sizeof(struct ubifs_budget_req)); memset(&req, 0, sizeof(struct ubifs_budget_req));
/* The "no space" flags will be cleared because dd_growth is > 0 */ /* The "no space" flags will be cleared because dd_growth is > 0 */
req.dd_growth = c->inode_budget + ALIGN(ui->data_len, 8); req.dd_growth = c->bi.inode_budget + ALIGN(ui->data_len, 8);
ubifs_release_budget(c, &req); ubifs_release_budget(c, &req);
} }
...@@ -682,9 +682,9 @@ long long ubifs_get_free_space_nolock(struct ubifs_info *c) ...@@ -682,9 +682,9 @@ long long ubifs_get_free_space_nolock(struct ubifs_info *c)
int rsvd_idx_lebs, lebs; int rsvd_idx_lebs, lebs;
long long available, outstanding, free; long long available, outstanding, free;
ubifs_assert(c->min_idx_lebs == ubifs_calc_min_idx_lebs(c)); ubifs_assert(c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c));
outstanding = c->budg_data_growth + c->budg_dd_growth; outstanding = c->bi.data_growth + c->bi.dd_growth;
available = ubifs_calc_available(c, c->min_idx_lebs); available = ubifs_calc_available(c, c->bi.min_idx_lebs);
/* /*
* When reporting free space to user-space, UBIFS guarantees that it is * When reporting free space to user-space, UBIFS guarantees that it is
...@@ -697,8 +697,8 @@ long long ubifs_get_free_space_nolock(struct ubifs_info *c) ...@@ -697,8 +697,8 @@ long long ubifs_get_free_space_nolock(struct ubifs_info *c)
* Note, the calculations below are similar to what we have in * Note, the calculations below are similar to what we have in
* 'do_budget_space()', so refer there for comments. * 'do_budget_space()', so refer there for comments.
*/ */
if (c->min_idx_lebs > c->lst.idx_lebs) if (c->bi.min_idx_lebs > c->lst.idx_lebs)
rsvd_idx_lebs = c->min_idx_lebs - c->lst.idx_lebs; rsvd_idx_lebs = c->bi.min_idx_lebs - c->lst.idx_lebs;
else else
rsvd_idx_lebs = 0; rsvd_idx_lebs = 0;
lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt - lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt -
......
...@@ -182,7 +182,7 @@ static int do_commit(struct ubifs_info *c) ...@@ -182,7 +182,7 @@ static int do_commit(struct ubifs_info *c)
c->mst_node->root_len = cpu_to_le32(zroot.len); c->mst_node->root_len = cpu_to_le32(zroot.len);
c->mst_node->ihead_lnum = cpu_to_le32(c->ihead_lnum); c->mst_node->ihead_lnum = cpu_to_le32(c->ihead_lnum);
c->mst_node->ihead_offs = cpu_to_le32(c->ihead_offs); c->mst_node->ihead_offs = cpu_to_le32(c->ihead_offs);
c->mst_node->index_size = cpu_to_le64(c->old_idx_sz); c->mst_node->index_size = cpu_to_le64(c->bi.old_idx_sz);
c->mst_node->lpt_lnum = cpu_to_le32(c->lpt_lnum); c->mst_node->lpt_lnum = cpu_to_le32(c->lpt_lnum);
c->mst_node->lpt_offs = cpu_to_le32(c->lpt_offs); c->mst_node->lpt_offs = cpu_to_le32(c->lpt_offs);
c->mst_node->nhead_lnum = cpu_to_le32(c->nhead_lnum); c->mst_node->nhead_lnum = cpu_to_le32(c->nhead_lnum);
......
This diff is collapsed.
This diff is collapsed.
...@@ -603,7 +603,7 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry) ...@@ -603,7 +603,7 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry)
ubifs_release_budget(c, &req); ubifs_release_budget(c, &req);
else { else {
/* We've deleted something - clean the "no space" flags */ /* We've deleted something - clean the "no space" flags */
c->nospace = c->nospace_rp = 0; c->bi.nospace = c->bi.nospace_rp = 0;
smp_wmb(); smp_wmb();
} }
return 0; return 0;
...@@ -693,7 +693,7 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry) ...@@ -693,7 +693,7 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
ubifs_release_budget(c, &req); ubifs_release_budget(c, &req);
else { else {
/* We've deleted something - clean the "no space" flags */ /* We've deleted something - clean the "no space" flags */
c->nospace = c->nospace_rp = 0; c->bi.nospace = c->bi.nospace_rp = 0;
smp_wmb(); smp_wmb();
} }
return 0; return 0;
......
...@@ -212,7 +212,7 @@ static void release_new_page_budget(struct ubifs_info *c) ...@@ -212,7 +212,7 @@ static void release_new_page_budget(struct ubifs_info *c)
*/ */
static void release_existing_page_budget(struct ubifs_info *c) static void release_existing_page_budget(struct ubifs_info *c)
{ {
struct ubifs_budget_req req = { .dd_growth = c->page_budget}; struct ubifs_budget_req req = { .dd_growth = c->bi.page_budget};
ubifs_release_budget(c, &req); ubifs_release_budget(c, &req);
} }
...@@ -971,11 +971,11 @@ static int do_writepage(struct page *page, int len) ...@@ -971,11 +971,11 @@ static int do_writepage(struct page *page, int len)
* the page locked, and it locks @ui_mutex. However, write-back does take inode * the page locked, and it locks @ui_mutex. However, write-back does take inode
* @i_mutex, which means other VFS operations may be run on this inode at the * @i_mutex, which means other VFS operations may be run on this inode at the
* same time. And the problematic one is truncation to smaller size, from where * same time. And the problematic one is truncation to smaller size, from where
* we have to call 'truncate_setsize()', which first changes @inode->i_size, then * we have to call 'truncate_setsize()', which first changes @inode->i_size,
* drops the truncated pages. And while dropping the pages, it takes the page * then drops the truncated pages. And while dropping the pages, it takes the
* lock. This means that 'do_truncation()' cannot call 'truncate_setsize()' with * page lock. This means that 'do_truncation()' cannot call 'truncate_setsize()'
* @ui_mutex locked, because it would deadlock with 'ubifs_writepage()'. This * with @ui_mutex locked, because it would deadlock with 'ubifs_writepage()'.
* means that @inode->i_size is changed while @ui_mutex is unlocked. * This means that @inode->i_size is changed while @ui_mutex is unlocked.
* *
* XXX(truncate): with the new truncate sequence this is not true anymore, * XXX(truncate): with the new truncate sequence this is not true anymore,
* and the calls to truncate_setsize can be move around freely. They should * and the calls to truncate_setsize can be move around freely. They should
...@@ -1189,7 +1189,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode, ...@@ -1189,7 +1189,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
if (budgeted) if (budgeted)
ubifs_release_budget(c, &req); ubifs_release_budget(c, &req);
else { else {
c->nospace = c->nospace_rp = 0; c->bi.nospace = c->bi.nospace_rp = 0;
smp_wmb(); smp_wmb();
} }
return err; return err;
...@@ -1312,7 +1312,11 @@ int ubifs_fsync(struct file *file, int datasync) ...@@ -1312,7 +1312,11 @@ int ubifs_fsync(struct file *file, int datasync)
dbg_gen("syncing inode %lu", inode->i_ino); dbg_gen("syncing inode %lu", inode->i_ino);
if (inode->i_sb->s_flags & MS_RDONLY) if (c->ro_mount)
/*
* For some really strange reasons VFS does not filter out
* 'fsync()' for R/O mounted file-systems as per 2.6.39.
*/
return 0; return 0;
/* /*
...@@ -1432,10 +1436,11 @@ static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags) ...@@ -1432,10 +1436,11 @@ static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
} }
/* /*
* mmap()d file has taken write protection fault and is being made * mmap()d file has taken write protection fault and is being made writable.
* writable. UBIFS must ensure page is budgeted for. * UBIFS must ensure page is budgeted for.
*/ */
static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma,
struct vm_fault *vmf)
{ {
struct page *page = vmf->page; struct page *page = vmf->page;
struct inode *inode = vma->vm_file->f_path.dentry->d_inode; struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
...@@ -1536,7 +1541,6 @@ static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -1536,7 +1541,6 @@ static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
{ {
int err; int err;
/* 'generic_file_mmap()' takes care of NOMMU case */
err = generic_file_mmap(file, vma); err = generic_file_mmap(file, vma);
if (err) if (err)
return err; return err;
......
...@@ -252,8 +252,8 @@ int ubifs_find_dirty_leb(struct ubifs_info *c, struct ubifs_lprops *ret_lp, ...@@ -252,8 +252,8 @@ int ubifs_find_dirty_leb(struct ubifs_info *c, struct ubifs_lprops *ret_lp,
* But if the index takes fewer LEBs than it is reserved for it, * But if the index takes fewer LEBs than it is reserved for it,
* this function must avoid picking those reserved LEBs. * this function must avoid picking those reserved LEBs.
*/ */
if (c->min_idx_lebs >= c->lst.idx_lebs) { if (c->bi.min_idx_lebs >= c->lst.idx_lebs) {
rsvd_idx_lebs = c->min_idx_lebs - c->lst.idx_lebs; rsvd_idx_lebs = c->bi.min_idx_lebs - c->lst.idx_lebs;
exclude_index = 1; exclude_index = 1;
} }
spin_unlock(&c->space_lock); spin_unlock(&c->space_lock);
...@@ -276,7 +276,7 @@ int ubifs_find_dirty_leb(struct ubifs_info *c, struct ubifs_lprops *ret_lp, ...@@ -276,7 +276,7 @@ int ubifs_find_dirty_leb(struct ubifs_info *c, struct ubifs_lprops *ret_lp,
pick_free = 0; pick_free = 0;
} else { } else {
spin_lock(&c->space_lock); spin_lock(&c->space_lock);
exclude_index = (c->min_idx_lebs >= c->lst.idx_lebs); exclude_index = (c->bi.min_idx_lebs >= c->lst.idx_lebs);
spin_unlock(&c->space_lock); spin_unlock(&c->space_lock);
} }
...@@ -501,8 +501,8 @@ int ubifs_find_free_space(struct ubifs_info *c, int min_space, int *offs, ...@@ -501,8 +501,8 @@ int ubifs_find_free_space(struct ubifs_info *c, int min_space, int *offs,
/* Check if there are enough empty LEBs for commit */ /* Check if there are enough empty LEBs for commit */
spin_lock(&c->space_lock); spin_lock(&c->space_lock);
if (c->min_idx_lebs > c->lst.idx_lebs) if (c->bi.min_idx_lebs > c->lst.idx_lebs)
rsvd_idx_lebs = c->min_idx_lebs - c->lst.idx_lebs; rsvd_idx_lebs = c->bi.min_idx_lebs - c->lst.idx_lebs;
else else
rsvd_idx_lebs = 0; rsvd_idx_lebs = 0;
lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt - lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt -
......
...@@ -100,6 +100,10 @@ static int switch_gc_head(struct ubifs_info *c) ...@@ -100,6 +100,10 @@ static int switch_gc_head(struct ubifs_info *c)
if (err) if (err)
return err; return err;
err = ubifs_wbuf_sync_nolock(wbuf);
if (err)
return err;
err = ubifs_add_bud_to_log(c, GCHD, gc_lnum, 0); err = ubifs_add_bud_to_log(c, GCHD, gc_lnum, 0);
if (err) if (err)
return err; return err;
...@@ -118,7 +122,7 @@ static int switch_gc_head(struct ubifs_info *c) ...@@ -118,7 +122,7 @@ static int switch_gc_head(struct ubifs_info *c)
* This function compares data nodes @a and @b. Returns %1 if @a has greater * This function compares data nodes @a and @b. Returns %1 if @a has greater
* inode or block number, and %-1 otherwise. * inode or block number, and %-1 otherwise.
*/ */
int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b) static int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
{ {
ino_t inuma, inumb; ino_t inuma, inumb;
struct ubifs_info *c = priv; struct ubifs_info *c = priv;
...@@ -161,7 +165,8 @@ int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b) ...@@ -161,7 +165,8 @@ int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
* first and sorted by length in descending order. Directory entry nodes go * first and sorted by length in descending order. Directory entry nodes go
* after inode nodes and are sorted in ascending hash valuer order. * after inode nodes and are sorted in ascending hash valuer order.
*/ */
int nondata_nodes_cmp(void *priv, struct list_head *a, struct list_head *b) static int nondata_nodes_cmp(void *priv, struct list_head *a,
struct list_head *b)
{ {
ino_t inuma, inumb; ino_t inuma, inumb;
struct ubifs_info *c = priv; struct ubifs_info *c = priv;
...@@ -473,6 +478,37 @@ int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp) ...@@ -473,6 +478,37 @@ int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp)
ubifs_assert(c->gc_lnum != lnum); ubifs_assert(c->gc_lnum != lnum);
ubifs_assert(wbuf->lnum != lnum); ubifs_assert(wbuf->lnum != lnum);
if (lp->free + lp->dirty == c->leb_size) {
/* Special case - a free LEB */
dbg_gc("LEB %d is free, return it", lp->lnum);
ubifs_assert(!(lp->flags & LPROPS_INDEX));
if (lp->free != c->leb_size) {
/*
* Write buffers must be sync'd before unmapping
* freeable LEBs, because one of them may contain data
* which obsoletes something in 'lp->pnum'.
*/
err = gc_sync_wbufs(c);
if (err)
return err;
err = ubifs_change_one_lp(c, lp->lnum, c->leb_size,
0, 0, 0, 0);
if (err)
return err;
}
err = ubifs_leb_unmap(c, lp->lnum);
if (err)
return err;
if (c->gc_lnum == -1) {
c->gc_lnum = lnum;
return LEB_RETAINED;
}
return LEB_FREED;
}
/* /*
* We scan the entire LEB even though we only really need to scan up to * We scan the entire LEB even though we only really need to scan up to
* (c->leb_size - lp->free). * (c->leb_size - lp->free).
...@@ -682,37 +718,6 @@ int ubifs_garbage_collect(struct ubifs_info *c, int anyway) ...@@ -682,37 +718,6 @@ int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
"(min. space %d)", lp.lnum, lp.free, lp.dirty, "(min. space %d)", lp.lnum, lp.free, lp.dirty,
lp.free + lp.dirty, min_space); lp.free + lp.dirty, min_space);
if (lp.free + lp.dirty == c->leb_size) {
/* An empty LEB was returned */
dbg_gc("LEB %d is free, return it", lp.lnum);
/*
* ubifs_find_dirty_leb() doesn't return freeable index
* LEBs.
*/
ubifs_assert(!(lp.flags & LPROPS_INDEX));
if (lp.free != c->leb_size) {
/*
* Write buffers must be sync'd before
* unmapping freeable LEBs, because one of them
* may contain data which obsoletes something
* in 'lp.pnum'.
*/
ret = gc_sync_wbufs(c);
if (ret)
goto out;
ret = ubifs_change_one_lp(c, lp.lnum,
c->leb_size, 0, 0, 0,
0);
if (ret)
goto out;
}
ret = ubifs_leb_unmap(c, lp.lnum);
if (ret)
goto out;
ret = lp.lnum;
break;
}
space_before = c->leb_size - wbuf->offs - wbuf->used; space_before = c->leb_size - wbuf->offs - wbuf->used;
if (wbuf->lnum == -1) if (wbuf->lnum == -1)
space_before = 0; space_before = 0;
......
...@@ -393,7 +393,7 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf) ...@@ -393,7 +393,7 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
ubifs_assert(wbuf->size % c->min_io_size == 0); ubifs_assert(wbuf->size % c->min_io_size == 0);
ubifs_assert(!c->ro_media && !c->ro_mount); ubifs_assert(!c->ro_media && !c->ro_mount);
if (c->leb_size - wbuf->offs >= c->max_write_size) if (c->leb_size - wbuf->offs >= c->max_write_size)
ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size )); ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
if (c->ro_error) if (c->ro_error)
return -EROFS; return -EROFS;
...@@ -452,8 +452,8 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf) ...@@ -452,8 +452,8 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
* @dtype: data type * @dtype: data type
* *
* This function targets the write-buffer to logical eraseblock @lnum:@offs. * This function targets the write-buffer to logical eraseblock @lnum:@offs.
* The write-buffer is synchronized if it is not empty. Returns zero in case of * The write-buffer has to be empty. Returns zero in case of success and a
* success and a negative error code in case of failure. * negative error code in case of failure.
*/ */
int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs, int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs,
int dtype) int dtype)
...@@ -465,13 +465,7 @@ int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs, ...@@ -465,13 +465,7 @@ int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs,
ubifs_assert(offs >= 0 && offs <= c->leb_size); ubifs_assert(offs >= 0 && offs <= c->leb_size);
ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7)); ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7));
ubifs_assert(lnum != wbuf->lnum); ubifs_assert(lnum != wbuf->lnum);
ubifs_assert(wbuf->used == 0);
if (wbuf->used > 0) {
int err = ubifs_wbuf_sync_nolock(wbuf);
if (err)
return err;
}
spin_lock(&wbuf->lock); spin_lock(&wbuf->lock);
wbuf->lnum = lnum; wbuf->lnum = lnum;
...@@ -573,7 +567,7 @@ int ubifs_bg_wbufs_sync(struct ubifs_info *c) ...@@ -573,7 +567,7 @@ int ubifs_bg_wbufs_sync(struct ubifs_info *c)
int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
{ {
struct ubifs_info *c = wbuf->c; struct ubifs_info *c = wbuf->c;
int err, written, n, aligned_len = ALIGN(len, 8), offs; int err, written, n, aligned_len = ALIGN(len, 8);
dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len, dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len,
dbg_ntype(((struct ubifs_ch *)buf)->node_type), dbg_ntype(((struct ubifs_ch *)buf)->node_type),
...@@ -588,7 +582,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) ...@@ -588,7 +582,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
ubifs_assert(mutex_is_locked(&wbuf->io_mutex)); ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
ubifs_assert(!c->ro_media && !c->ro_mount); ubifs_assert(!c->ro_media && !c->ro_mount);
if (c->leb_size - wbuf->offs >= c->max_write_size) if (c->leb_size - wbuf->offs >= c->max_write_size)
ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size )); ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) { if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) {
err = -ENOSPC; err = -ENOSPC;
...@@ -636,7 +630,6 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) ...@@ -636,7 +630,6 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
goto exit; goto exit;
} }
offs = wbuf->offs;
written = 0; written = 0;
if (wbuf->used) { if (wbuf->used) {
...@@ -653,7 +646,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) ...@@ -653,7 +646,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
if (err) if (err)
goto out; goto out;
offs += wbuf->size; wbuf->offs += wbuf->size;
len -= wbuf->avail; len -= wbuf->avail;
aligned_len -= wbuf->avail; aligned_len -= wbuf->avail;
written += wbuf->avail; written += wbuf->avail;
...@@ -672,7 +665,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) ...@@ -672,7 +665,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
if (err) if (err)
goto out; goto out;
offs += wbuf->size; wbuf->offs += wbuf->size;
len -= wbuf->size; len -= wbuf->size;
aligned_len -= wbuf->size; aligned_len -= wbuf->size;
written += wbuf->size; written += wbuf->size;
...@@ -687,12 +680,13 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) ...@@ -687,12 +680,13 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
n = aligned_len >> c->max_write_shift; n = aligned_len >> c->max_write_shift;
if (n) { if (n) {
n <<= c->max_write_shift; n <<= c->max_write_shift;
dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum, offs); dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
err = ubi_leb_write(c->ubi, wbuf->lnum, buf + written, offs, n, wbuf->offs);
wbuf->dtype); err = ubi_leb_write(c->ubi, wbuf->lnum, buf + written,
wbuf->offs, n, wbuf->dtype);
if (err) if (err)
goto out; goto out;
offs += n; wbuf->offs += n;
aligned_len -= n; aligned_len -= n;
len -= n; len -= n;
written += n; written += n;
...@@ -707,7 +701,6 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) ...@@ -707,7 +701,6 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
*/ */
memcpy(wbuf->buf, buf + written, len); memcpy(wbuf->buf, buf + written, len);
wbuf->offs = offs;
if (c->leb_size - wbuf->offs >= c->max_write_size) if (c->leb_size - wbuf->offs >= c->max_write_size)
wbuf->size = c->max_write_size; wbuf->size = c->max_write_size;
else else
......
...@@ -141,14 +141,8 @@ static int reserve_space(struct ubifs_info *c, int jhead, int len) ...@@ -141,14 +141,8 @@ static int reserve_space(struct ubifs_info *c, int jhead, int len)
* LEB with some empty space. * LEB with some empty space.
*/ */
lnum = ubifs_find_free_space(c, len, &offs, squeeze); lnum = ubifs_find_free_space(c, len, &offs, squeeze);
if (lnum >= 0) { if (lnum >= 0)
/* Found an LEB, add it to the journal head */
err = ubifs_add_bud_to_log(c, jhead, lnum, offs);
if (err)
goto out_return;
/* A new bud was successfully allocated and added to the log */
goto out; goto out;
}
err = lnum; err = lnum;
if (err != -ENOSPC) if (err != -ENOSPC)
...@@ -203,12 +197,23 @@ static int reserve_space(struct ubifs_info *c, int jhead, int len) ...@@ -203,12 +197,23 @@ static int reserve_space(struct ubifs_info *c, int jhead, int len)
return 0; return 0;
} }
err = ubifs_add_bud_to_log(c, jhead, lnum, 0);
if (err)
goto out_return;
offs = 0; offs = 0;
out: out:
/*
* Make sure we synchronize the write-buffer before we add the new bud
* to the log. Otherwise we may have a power cut after the log
* reference node for the last bud (@lnum) is written but before the
* write-buffer data are written to the next-to-last bud
* (@wbuf->lnum). And the effect would be that the recovery would see
* that there is corruption in the next-to-last bud.
*/
err = ubifs_wbuf_sync_nolock(wbuf);
if (err)
goto out_return;
err = ubifs_add_bud_to_log(c, jhead, lnum, offs);
if (err)
goto out_return;
err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs, wbuf->dtype); err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs, wbuf->dtype);
if (err) if (err)
goto out_unlock; goto out_unlock;
...@@ -380,10 +385,8 @@ static int make_reservation(struct ubifs_info *c, int jhead, int len) ...@@ -380,10 +385,8 @@ static int make_reservation(struct ubifs_info *c, int jhead, int len)
if (err == -ENOSPC) { if (err == -ENOSPC) {
/* This are some budgeting problems, print useful information */ /* This are some budgeting problems, print useful information */
down_write(&c->commit_sem); down_write(&c->commit_sem);
spin_lock(&c->space_lock);
dbg_dump_stack(); dbg_dump_stack();
dbg_dump_budg(c); dbg_dump_budg(c, &c->bi);
spin_unlock(&c->space_lock);
dbg_dump_lprops(c); dbg_dump_lprops(c);
cmt_retries = dbg_check_lprops(c); cmt_retries = dbg_check_lprops(c);
up_write(&c->commit_sem); up_write(&c->commit_sem);
......
...@@ -99,20 +99,6 @@ struct ubifs_wbuf *ubifs_get_wbuf(struct ubifs_info *c, int lnum) ...@@ -99,20 +99,6 @@ struct ubifs_wbuf *ubifs_get_wbuf(struct ubifs_info *c, int lnum)
return NULL; return NULL;
} }
/**
* next_log_lnum - switch to the next log LEB.
* @c: UBIFS file-system description object
* @lnum: current log LEB
*/
static inline int next_log_lnum(const struct ubifs_info *c, int lnum)
{
lnum += 1;
if (lnum > c->log_last)
lnum = UBIFS_LOG_LNUM;
return lnum;
}
/** /**
* empty_log_bytes - calculate amount of empty space in the log. * empty_log_bytes - calculate amount of empty space in the log.
* @c: UBIFS file-system description object * @c: UBIFS file-system description object
...@@ -257,7 +243,7 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs) ...@@ -257,7 +243,7 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
ref->jhead = cpu_to_le32(jhead); ref->jhead = cpu_to_le32(jhead);
if (c->lhead_offs > c->leb_size - c->ref_node_alsz) { if (c->lhead_offs > c->leb_size - c->ref_node_alsz) {
c->lhead_lnum = next_log_lnum(c, c->lhead_lnum); c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
c->lhead_offs = 0; c->lhead_offs = 0;
} }
...@@ -425,7 +411,7 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum) ...@@ -425,7 +411,7 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
/* Switch to the next log LEB */ /* Switch to the next log LEB */
if (c->lhead_offs) { if (c->lhead_offs) {
c->lhead_lnum = next_log_lnum(c, c->lhead_lnum); c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
c->lhead_offs = 0; c->lhead_offs = 0;
} }
...@@ -446,7 +432,7 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum) ...@@ -446,7 +432,7 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
c->lhead_offs += len; c->lhead_offs += len;
if (c->lhead_offs == c->leb_size) { if (c->lhead_offs == c->leb_size) {
c->lhead_lnum = next_log_lnum(c, c->lhead_lnum); c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
c->lhead_offs = 0; c->lhead_offs = 0;
} }
...@@ -533,7 +519,7 @@ int ubifs_log_post_commit(struct ubifs_info *c, int old_ltail_lnum) ...@@ -533,7 +519,7 @@ int ubifs_log_post_commit(struct ubifs_info *c, int old_ltail_lnum)
} }
mutex_lock(&c->log_mutex); mutex_lock(&c->log_mutex);
for (lnum = old_ltail_lnum; lnum != c->ltail_lnum; for (lnum = old_ltail_lnum; lnum != c->ltail_lnum;
lnum = next_log_lnum(c, lnum)) { lnum = ubifs_next_log_lnum(c, lnum)) {
dbg_log("unmap log LEB %d", lnum); dbg_log("unmap log LEB %d", lnum);
err = ubifs_leb_unmap(c, lnum); err = ubifs_leb_unmap(c, lnum);
if (err) if (err)
...@@ -642,7 +628,7 @@ static int add_node(struct ubifs_info *c, void *buf, int *lnum, int *offs, ...@@ -642,7 +628,7 @@ static int add_node(struct ubifs_info *c, void *buf, int *lnum, int *offs,
err = ubifs_leb_change(c, *lnum, buf, sz, UBI_SHORTTERM); err = ubifs_leb_change(c, *lnum, buf, sz, UBI_SHORTTERM);
if (err) if (err)
return err; return err;
*lnum = next_log_lnum(c, *lnum); *lnum = ubifs_next_log_lnum(c, *lnum);
*offs = 0; *offs = 0;
} }
memcpy(buf + *offs, node, len); memcpy(buf + *offs, node, len);
...@@ -712,7 +698,7 @@ int ubifs_consolidate_log(struct ubifs_info *c) ...@@ -712,7 +698,7 @@ int ubifs_consolidate_log(struct ubifs_info *c)
ubifs_scan_destroy(sleb); ubifs_scan_destroy(sleb);
if (lnum == c->lhead_lnum) if (lnum == c->lhead_lnum)
break; break;
lnum = next_log_lnum(c, lnum); lnum = ubifs_next_log_lnum(c, lnum);
} }
if (offs) { if (offs) {
int sz = ALIGN(offs, c->min_io_size); int sz = ALIGN(offs, c->min_io_size);
...@@ -732,7 +718,7 @@ int ubifs_consolidate_log(struct ubifs_info *c) ...@@ -732,7 +718,7 @@ int ubifs_consolidate_log(struct ubifs_info *c)
/* Unmap remaining LEBs */ /* Unmap remaining LEBs */
lnum = write_lnum; lnum = write_lnum;
do { do {
lnum = next_log_lnum(c, lnum); lnum = ubifs_next_log_lnum(c, lnum);
err = ubifs_leb_unmap(c, lnum); err = ubifs_leb_unmap(c, lnum);
if (err) if (err)
return err; return err;
......
...@@ -1006,22 +1006,12 @@ void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat, ...@@ -1006,22 +1006,12 @@ void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat,
} }
} }
/**
* struct scan_check_data - data provided to scan callback function.
* @lst: LEB properties statistics
* @err: error code
*/
struct scan_check_data {
struct ubifs_lp_stats lst;
int err;
};
/** /**
* scan_check_cb - scan callback. * scan_check_cb - scan callback.
* @c: the UBIFS file-system description object * @c: the UBIFS file-system description object
* @lp: LEB properties to scan * @lp: LEB properties to scan
* @in_tree: whether the LEB properties are in main memory * @in_tree: whether the LEB properties are in main memory
* @data: information passed to and from the caller of the scan * @lst: lprops statistics to update
* *
* This function returns a code that indicates whether the scan should continue * This function returns a code that indicates whether the scan should continue
* (%LPT_SCAN_CONTINUE), whether the LEB properties should be added to the tree * (%LPT_SCAN_CONTINUE), whether the LEB properties should be added to the tree
...@@ -1030,11 +1020,10 @@ struct scan_check_data { ...@@ -1030,11 +1020,10 @@ struct scan_check_data {
*/ */
static int scan_check_cb(struct ubifs_info *c, static int scan_check_cb(struct ubifs_info *c,
const struct ubifs_lprops *lp, int in_tree, const struct ubifs_lprops *lp, int in_tree,
struct scan_check_data *data) struct ubifs_lp_stats *lst)
{ {
struct ubifs_scan_leb *sleb; struct ubifs_scan_leb *sleb;
struct ubifs_scan_node *snod; struct ubifs_scan_node *snod;
struct ubifs_lp_stats *lst = &data->lst;
int cat, lnum = lp->lnum, is_idx = 0, used = 0, free, dirty, ret; int cat, lnum = lp->lnum, is_idx = 0, used = 0, free, dirty, ret;
void *buf = NULL; void *buf = NULL;
...@@ -1044,7 +1033,7 @@ static int scan_check_cb(struct ubifs_info *c, ...@@ -1044,7 +1033,7 @@ static int scan_check_cb(struct ubifs_info *c,
if (cat != (lp->flags & LPROPS_CAT_MASK)) { if (cat != (lp->flags & LPROPS_CAT_MASK)) {
ubifs_err("bad LEB category %d expected %d", ubifs_err("bad LEB category %d expected %d",
(lp->flags & LPROPS_CAT_MASK), cat); (lp->flags & LPROPS_CAT_MASK), cat);
goto out; return -EINVAL;
} }
} }
...@@ -1078,7 +1067,7 @@ static int scan_check_cb(struct ubifs_info *c, ...@@ -1078,7 +1067,7 @@ static int scan_check_cb(struct ubifs_info *c,
} }
if (!found) { if (!found) {
ubifs_err("bad LPT list (category %d)", cat); ubifs_err("bad LPT list (category %d)", cat);
goto out; return -EINVAL;
} }
} }
} }
...@@ -1090,45 +1079,40 @@ static int scan_check_cb(struct ubifs_info *c, ...@@ -1090,45 +1079,40 @@ static int scan_check_cb(struct ubifs_info *c,
if ((lp->hpos != -1 && heap->arr[lp->hpos]->lnum != lnum) || if ((lp->hpos != -1 && heap->arr[lp->hpos]->lnum != lnum) ||
lp != heap->arr[lp->hpos]) { lp != heap->arr[lp->hpos]) {
ubifs_err("bad LPT heap (category %d)", cat); ubifs_err("bad LPT heap (category %d)", cat);
goto out; return -EINVAL;
} }
} }
buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
if (!buf) { if (!buf)
ubifs_err("cannot allocate memory to scan LEB %d", lnum); return -ENOMEM;
goto out;
/*
* After an unclean unmount, empty and freeable LEBs
* may contain garbage - do not scan them.
*/
if (lp->free == c->leb_size) {
lst->empty_lebs += 1;
lst->total_free += c->leb_size;
lst->total_dark += ubifs_calc_dark(c, c->leb_size);
return LPT_SCAN_CONTINUE;
}
if (lp->free + lp->dirty == c->leb_size &&
!(lp->flags & LPROPS_INDEX)) {
lst->total_free += lp->free;
lst->total_dirty += lp->dirty;
lst->total_dark += ubifs_calc_dark(c, c->leb_size);
return LPT_SCAN_CONTINUE;
} }
sleb = ubifs_scan(c, lnum, 0, buf, 0); sleb = ubifs_scan(c, lnum, 0, buf, 0);
if (IS_ERR(sleb)) { if (IS_ERR(sleb)) {
/* ret = PTR_ERR(sleb);
* After an unclean unmount, empty and freeable LEBs if (ret == -EUCLEAN) {
* may contain garbage. dbg_dump_lprops(c);
*/ dbg_dump_budg(c, &c->bi);
if (lp->free == c->leb_size) {
ubifs_err("scan errors were in empty LEB "
"- continuing checking");
lst->empty_lebs += 1;
lst->total_free += c->leb_size;
lst->total_dark += ubifs_calc_dark(c, c->leb_size);
ret = LPT_SCAN_CONTINUE;
goto exit;
}
if (lp->free + lp->dirty == c->leb_size &&
!(lp->flags & LPROPS_INDEX)) {
ubifs_err("scan errors were in freeable LEB "
"- continuing checking");
lst->total_free += lp->free;
lst->total_dirty += lp->dirty;
lst->total_dark += ubifs_calc_dark(c, c->leb_size);
ret = LPT_SCAN_CONTINUE;
goto exit;
} }
data->err = PTR_ERR(sleb); goto out;
ret = LPT_SCAN_STOP;
goto exit;
} }
is_idx = -1; is_idx = -1;
...@@ -1246,10 +1230,8 @@ static int scan_check_cb(struct ubifs_info *c, ...@@ -1246,10 +1230,8 @@ static int scan_check_cb(struct ubifs_info *c,
} }
ubifs_scan_destroy(sleb); ubifs_scan_destroy(sleb);
ret = LPT_SCAN_CONTINUE;
exit:
vfree(buf); vfree(buf);
return ret; return LPT_SCAN_CONTINUE;
out_print: out_print:
ubifs_err("bad accounting of LEB %d: free %d, dirty %d flags %#x, " ubifs_err("bad accounting of LEB %d: free %d, dirty %d flags %#x, "
...@@ -1258,10 +1240,10 @@ static int scan_check_cb(struct ubifs_info *c, ...@@ -1258,10 +1240,10 @@ static int scan_check_cb(struct ubifs_info *c,
dbg_dump_leb(c, lnum); dbg_dump_leb(c, lnum);
out_destroy: out_destroy:
ubifs_scan_destroy(sleb); ubifs_scan_destroy(sleb);
ret = -EINVAL;
out: out:
vfree(buf); vfree(buf);
data->err = -EINVAL; return ret;
return LPT_SCAN_STOP;
} }
/** /**
...@@ -1278,8 +1260,7 @@ static int scan_check_cb(struct ubifs_info *c, ...@@ -1278,8 +1260,7 @@ static int scan_check_cb(struct ubifs_info *c,
int dbg_check_lprops(struct ubifs_info *c) int dbg_check_lprops(struct ubifs_info *c)
{ {
int i, err; int i, err;
struct scan_check_data data; struct ubifs_lp_stats lst;
struct ubifs_lp_stats *lst = &data.lst;
if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS)) if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS))
return 0; return 0;
...@@ -1294,29 +1275,23 @@ int dbg_check_lprops(struct ubifs_info *c) ...@@ -1294,29 +1275,23 @@ int dbg_check_lprops(struct ubifs_info *c)
return err; return err;
} }
memset(lst, 0, sizeof(struct ubifs_lp_stats)); memset(&lst, 0, sizeof(struct ubifs_lp_stats));
data.err = 0;
err = ubifs_lpt_scan_nolock(c, c->main_first, c->leb_cnt - 1, err = ubifs_lpt_scan_nolock(c, c->main_first, c->leb_cnt - 1,
(ubifs_lpt_scan_callback)scan_check_cb, (ubifs_lpt_scan_callback)scan_check_cb,
&data); &lst);
if (err && err != -ENOSPC) if (err && err != -ENOSPC)
goto out; goto out;
if (data.err) {
err = data.err;
goto out;
}
if (lst->empty_lebs != c->lst.empty_lebs || if (lst.empty_lebs != c->lst.empty_lebs ||
lst->idx_lebs != c->lst.idx_lebs || lst.idx_lebs != c->lst.idx_lebs ||
lst->total_free != c->lst.total_free || lst.total_free != c->lst.total_free ||
lst->total_dirty != c->lst.total_dirty || lst.total_dirty != c->lst.total_dirty ||
lst->total_used != c->lst.total_used) { lst.total_used != c->lst.total_used) {
ubifs_err("bad overall accounting"); ubifs_err("bad overall accounting");
ubifs_err("calculated: empty_lebs %d, idx_lebs %d, " ubifs_err("calculated: empty_lebs %d, idx_lebs %d, "
"total_free %lld, total_dirty %lld, total_used %lld", "total_free %lld, total_dirty %lld, total_used %lld",
lst->empty_lebs, lst->idx_lebs, lst->total_free, lst.empty_lebs, lst.idx_lebs, lst.total_free,
lst->total_dirty, lst->total_used); lst.total_dirty, lst.total_used);
ubifs_err("read from lprops: empty_lebs %d, idx_lebs %d, " ubifs_err("read from lprops: empty_lebs %d, idx_lebs %d, "
"total_free %lld, total_dirty %lld, total_used %lld", "total_free %lld, total_dirty %lld, total_used %lld",
c->lst.empty_lebs, c->lst.idx_lebs, c->lst.total_free, c->lst.empty_lebs, c->lst.idx_lebs, c->lst.total_free,
...@@ -1325,11 +1300,11 @@ int dbg_check_lprops(struct ubifs_info *c) ...@@ -1325,11 +1300,11 @@ int dbg_check_lprops(struct ubifs_info *c)
goto out; goto out;
} }
if (lst->total_dead != c->lst.total_dead || if (lst.total_dead != c->lst.total_dead ||
lst->total_dark != c->lst.total_dark) { lst.total_dark != c->lst.total_dark) {
ubifs_err("bad dead/dark space accounting"); ubifs_err("bad dead/dark space accounting");
ubifs_err("calculated: total_dead %lld, total_dark %lld", ubifs_err("calculated: total_dead %lld, total_dark %lld",
lst->total_dead, lst->total_dark); lst.total_dead, lst.total_dark);
ubifs_err("read from lprops: total_dead %lld, total_dark %lld", ubifs_err("read from lprops: total_dead %lld, total_dark %lld",
c->lst.total_dead, c->lst.total_dark); c->lst.total_dead, c->lst.total_dark);
err = -EINVAL; err = -EINVAL;
......
...@@ -29,6 +29,12 @@ ...@@ -29,6 +29,12 @@
#include <linux/slab.h> #include <linux/slab.h>
#include "ubifs.h" #include "ubifs.h"
#ifdef CONFIG_UBIFS_FS_DEBUG
static int dbg_populate_lsave(struct ubifs_info *c);
#else
#define dbg_populate_lsave(c) 0
#endif
/** /**
* first_dirty_cnode - find first dirty cnode. * first_dirty_cnode - find first dirty cnode.
* @c: UBIFS file-system description object * @c: UBIFS file-system description object
...@@ -586,7 +592,7 @@ static struct ubifs_pnode *next_pnode_to_dirty(struct ubifs_info *c, ...@@ -586,7 +592,7 @@ static struct ubifs_pnode *next_pnode_to_dirty(struct ubifs_info *c,
if (nnode->nbranch[iip].lnum) if (nnode->nbranch[iip].lnum)
break; break;
} }
} while (iip >= UBIFS_LPT_FANOUT); } while (iip >= UBIFS_LPT_FANOUT);
/* Go right */ /* Go right */
nnode = ubifs_get_nnode(c, nnode, iip); nnode = ubifs_get_nnode(c, nnode, iip);
...@@ -815,6 +821,10 @@ static void populate_lsave(struct ubifs_info *c) ...@@ -815,6 +821,10 @@ static void populate_lsave(struct ubifs_info *c)
c->lpt_drty_flgs |= LSAVE_DIRTY; c->lpt_drty_flgs |= LSAVE_DIRTY;
ubifs_add_lpt_dirt(c, c->lsave_lnum, c->lsave_sz); ubifs_add_lpt_dirt(c, c->lsave_lnum, c->lsave_sz);
} }
if (dbg_populate_lsave(c))
return;
list_for_each_entry(lprops, &c->empty_list, list) { list_for_each_entry(lprops, &c->empty_list, list) {
c->lsave[cnt++] = lprops->lnum; c->lsave[cnt++] = lprops->lnum;
if (cnt >= c->lsave_cnt) if (cnt >= c->lsave_cnt)
...@@ -1994,4 +2004,47 @@ void dbg_dump_lpt_lebs(const struct ubifs_info *c) ...@@ -1994,4 +2004,47 @@ void dbg_dump_lpt_lebs(const struct ubifs_info *c)
current->pid); current->pid);
} }
/**
* dbg_populate_lsave - debugging version of 'populate_lsave()'
* @c: UBIFS file-system description object
*
* This is a debugging version for 'populate_lsave()' which populates lsave
* with random LEBs instead of useful LEBs, which is good for test coverage.
* Returns zero if lsave has not been populated (this debugging feature is
* disabled) an non-zero if lsave has been populated.
*/
static int dbg_populate_lsave(struct ubifs_info *c)
{
struct ubifs_lprops *lprops;
struct ubifs_lpt_heap *heap;
int i;
if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
return 0;
if (random32() & 3)
return 0;
for (i = 0; i < c->lsave_cnt; i++)
c->lsave[i] = c->main_first;
list_for_each_entry(lprops, &c->empty_list, list)
c->lsave[random32() % c->lsave_cnt] = lprops->lnum;
list_for_each_entry(lprops, &c->freeable_list, list)
c->lsave[random32() % c->lsave_cnt] = lprops->lnum;
list_for_each_entry(lprops, &c->frdi_idx_list, list)
c->lsave[random32() % c->lsave_cnt] = lprops->lnum;
heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1];
for (i = 0; i < heap->cnt; i++)
c->lsave[random32() % c->lsave_cnt] = heap->arr[i]->lnum;
heap = &c->lpt_heap[LPROPS_DIRTY - 1];
for (i = 0; i < heap->cnt; i++)
c->lsave[random32() % c->lsave_cnt] = heap->arr[i]->lnum;
heap = &c->lpt_heap[LPROPS_FREE - 1];
for (i = 0; i < heap->cnt; i++)
c->lsave[random32() % c->lsave_cnt] = heap->arr[i]->lnum;
return 1;
}
#endif /* CONFIG_UBIFS_FS_DEBUG */ #endif /* CONFIG_UBIFS_FS_DEBUG */
...@@ -148,7 +148,7 @@ static int validate_master(const struct ubifs_info *c) ...@@ -148,7 +148,7 @@ static int validate_master(const struct ubifs_info *c)
} }
main_sz = (long long)c->main_lebs * c->leb_size; main_sz = (long long)c->main_lebs * c->leb_size;
if (c->old_idx_sz & 7 || c->old_idx_sz >= main_sz) { if (c->bi.old_idx_sz & 7 || c->bi.old_idx_sz >= main_sz) {
err = 9; err = 9;
goto out; goto out;
} }
...@@ -218,7 +218,7 @@ static int validate_master(const struct ubifs_info *c) ...@@ -218,7 +218,7 @@ static int validate_master(const struct ubifs_info *c)
} }
if (c->lst.total_dead + c->lst.total_dark + if (c->lst.total_dead + c->lst.total_dark +
c->lst.total_used + c->old_idx_sz > main_sz) { c->lst.total_used + c->bi.old_idx_sz > main_sz) {
err = 21; err = 21;
goto out; goto out;
} }
...@@ -286,7 +286,7 @@ int ubifs_read_master(struct ubifs_info *c) ...@@ -286,7 +286,7 @@ int ubifs_read_master(struct ubifs_info *c)
c->gc_lnum = le32_to_cpu(c->mst_node->gc_lnum); c->gc_lnum = le32_to_cpu(c->mst_node->gc_lnum);
c->ihead_lnum = le32_to_cpu(c->mst_node->ihead_lnum); c->ihead_lnum = le32_to_cpu(c->mst_node->ihead_lnum);
c->ihead_offs = le32_to_cpu(c->mst_node->ihead_offs); c->ihead_offs = le32_to_cpu(c->mst_node->ihead_offs);
c->old_idx_sz = le64_to_cpu(c->mst_node->index_size); c->bi.old_idx_sz = le64_to_cpu(c->mst_node->index_size);
c->lpt_lnum = le32_to_cpu(c->mst_node->lpt_lnum); c->lpt_lnum = le32_to_cpu(c->mst_node->lpt_lnum);
c->lpt_offs = le32_to_cpu(c->mst_node->lpt_offs); c->lpt_offs = le32_to_cpu(c->mst_node->lpt_offs);
c->nhead_lnum = le32_to_cpu(c->mst_node->nhead_lnum); c->nhead_lnum = le32_to_cpu(c->mst_node->nhead_lnum);
...@@ -305,7 +305,7 @@ int ubifs_read_master(struct ubifs_info *c) ...@@ -305,7 +305,7 @@ int ubifs_read_master(struct ubifs_info *c)
c->lst.total_dead = le64_to_cpu(c->mst_node->total_dead); c->lst.total_dead = le64_to_cpu(c->mst_node->total_dead);
c->lst.total_dark = le64_to_cpu(c->mst_node->total_dark); c->lst.total_dark = le64_to_cpu(c->mst_node->total_dark);
c->calc_idx_sz = c->old_idx_sz; c->calc_idx_sz = c->bi.old_idx_sz;
if (c->mst_node->flags & cpu_to_le32(UBIFS_MST_NO_ORPHS)) if (c->mst_node->flags & cpu_to_le32(UBIFS_MST_NO_ORPHS))
c->no_orphs = 1; c->no_orphs = 1;
......
...@@ -340,4 +340,21 @@ static inline void ubifs_release_lprops(struct ubifs_info *c) ...@@ -340,4 +340,21 @@ static inline void ubifs_release_lprops(struct ubifs_info *c)
mutex_unlock(&c->lp_mutex); mutex_unlock(&c->lp_mutex);
} }
/**
* ubifs_next_log_lnum - switch to the next log LEB.
* @c: UBIFS file-system description object
* @lnum: current log LEB
*
* This helper function returns the log LEB number which goes next after LEB
* 'lnum'.
*/
static inline int ubifs_next_log_lnum(const struct ubifs_info *c, int lnum)
{
lnum += 1;
if (lnum > c->log_last)
lnum = UBIFS_LOG_LNUM;
return lnum;
}
#endif /* __UBIFS_MISC_H__ */ #endif /* __UBIFS_MISC_H__ */
...@@ -673,7 +673,8 @@ static int kill_orphans(struct ubifs_info *c) ...@@ -673,7 +673,8 @@ static int kill_orphans(struct ubifs_info *c)
sleb = ubifs_scan(c, lnum, 0, c->sbuf, 1); sleb = ubifs_scan(c, lnum, 0, c->sbuf, 1);
if (IS_ERR(sleb)) { if (IS_ERR(sleb)) {
if (PTR_ERR(sleb) == -EUCLEAN) if (PTR_ERR(sleb) == -EUCLEAN)
sleb = ubifs_recover_leb(c, lnum, 0, c->sbuf, 0); sleb = ubifs_recover_leb(c, lnum, 0,
c->sbuf, 0);
if (IS_ERR(sleb)) { if (IS_ERR(sleb)) {
err = PTR_ERR(sleb); err = PTR_ERR(sleb);
break; break;
......
This diff is collapsed.
This diff is collapsed.
...@@ -475,7 +475,8 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup) ...@@ -475,7 +475,8 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup)
* @c: UBIFS file-system description object * @c: UBIFS file-system description object
* *
* This function returns a pointer to the superblock node or a negative error * This function returns a pointer to the superblock node or a negative error
* code. * code. Note, the user of this function is responsible of kfree()'ing the
* returned superblock buffer.
*/ */
struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c) struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c)
{ {
...@@ -616,6 +617,7 @@ int ubifs_read_superblock(struct ubifs_info *c) ...@@ -616,6 +617,7 @@ int ubifs_read_superblock(struct ubifs_info *c)
c->vfs_sb->s_time_gran = le32_to_cpu(sup->time_gran); c->vfs_sb->s_time_gran = le32_to_cpu(sup->time_gran);
memcpy(&c->uuid, &sup->uuid, 16); memcpy(&c->uuid, &sup->uuid, 16);
c->big_lpt = !!(sup_flags & UBIFS_FLG_BIGLPT); c->big_lpt = !!(sup_flags & UBIFS_FLG_BIGLPT);
c->space_fixup = !!(sup_flags & UBIFS_FLG_SPACE_FIXUP);
/* Automatically increase file system size to the maximum size */ /* Automatically increase file system size to the maximum size */
c->old_leb_cnt = c->leb_cnt; c->old_leb_cnt = c->leb_cnt;
...@@ -650,3 +652,152 @@ int ubifs_read_superblock(struct ubifs_info *c) ...@@ -650,3 +652,152 @@ int ubifs_read_superblock(struct ubifs_info *c)
kfree(sup); kfree(sup);
return err; return err;
} }
/**
* fixup_leb - fixup/unmap an LEB containing free space.
* @c: UBIFS file-system description object
* @lnum: the LEB number to fix up
* @len: number of used bytes in LEB (starting at offset 0)
*
* This function reads the contents of the given LEB number @lnum, then fixes
* it up, so that empty min. I/O units in the end of LEB are actually erased on
* flash (rather than being just all-0xff real data). If the LEB is completely
* empty, it is simply unmapped.
*/
static int fixup_leb(struct ubifs_info *c, int lnum, int len)
{
int err;
ubifs_assert(len >= 0);
ubifs_assert(len % c->min_io_size == 0);
ubifs_assert(len < c->leb_size);
if (len == 0) {
dbg_mnt("unmap empty LEB %d", lnum);
return ubi_leb_unmap(c->ubi, lnum);
}
dbg_mnt("fixup LEB %d, data len %d", lnum, len);
err = ubi_read(c->ubi, lnum, c->sbuf, 0, len);
if (err)
return err;
return ubi_leb_change(c->ubi, lnum, c->sbuf, len, UBI_UNKNOWN);
}
/**
* fixup_free_space - find & remap all LEBs containing free space.
* @c: UBIFS file-system description object
*
* This function walks through all LEBs in the filesystem and fiexes up those
* containing free/empty space.
*/
static int fixup_free_space(struct ubifs_info *c)
{
int lnum, err = 0;
struct ubifs_lprops *lprops;
ubifs_get_lprops(c);
/* Fixup LEBs in the master area */
for (lnum = UBIFS_MST_LNUM; lnum < UBIFS_LOG_LNUM; lnum++) {
err = fixup_leb(c, lnum, c->mst_offs + c->mst_node_alsz);
if (err)
goto out;
}
/* Unmap unused log LEBs */
lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
while (lnum != c->ltail_lnum) {
err = fixup_leb(c, lnum, 0);
if (err)
goto out;
lnum = ubifs_next_log_lnum(c, lnum);
}
/* Fixup the current log head */
err = fixup_leb(c, c->lhead_lnum, c->lhead_offs);
if (err)
goto out;
/* Fixup LEBs in the LPT area */
for (lnum = c->lpt_first; lnum <= c->lpt_last; lnum++) {
int free = c->ltab[lnum - c->lpt_first].free;
if (free > 0) {
err = fixup_leb(c, lnum, c->leb_size - free);
if (err)
goto out;
}
}
/* Unmap LEBs in the orphans area */
for (lnum = c->orph_first; lnum <= c->orph_last; lnum++) {
err = fixup_leb(c, lnum, 0);
if (err)
goto out;
}
/* Fixup LEBs in the main area */
for (lnum = c->main_first; lnum < c->leb_cnt; lnum++) {
lprops = ubifs_lpt_lookup(c, lnum);
if (IS_ERR(lprops)) {
err = PTR_ERR(lprops);
goto out;
}
if (lprops->free > 0) {
err = fixup_leb(c, lnum, c->leb_size - lprops->free);
if (err)
goto out;
}
}
out:
ubifs_release_lprops(c);
return err;
}
/**
* ubifs_fixup_free_space - find & fix all LEBs with free space.
* @c: UBIFS file-system description object
*
* This function fixes up LEBs containing free space on first mount, if the
* appropriate flag was set when the FS was created. Each LEB with one or more
* empty min. I/O unit (i.e. free-space-count > 0) is re-written, to make sure
* the free space is actually erased. E.g., this is necessary for some NAND
* chips, since the free space may have been programmed like real "0xff" data
* (generating a non-0xff ECC), causing future writes to the not-really-erased
* NAND pages to behave badly. After the space is fixed up, the superblock flag
* is cleared, so that this is skipped for all future mounts.
*/
int ubifs_fixup_free_space(struct ubifs_info *c)
{
int err;
struct ubifs_sb_node *sup;
ubifs_assert(c->space_fixup);
ubifs_assert(!c->ro_mount);
ubifs_msg("start fixing up free space");
err = fixup_free_space(c);
if (err)
return err;
sup = ubifs_read_sb_node(c);
if (IS_ERR(sup))
return PTR_ERR(sup);
/* Free-space fixup is no longer required */
c->space_fixup = 0;
sup->flags &= cpu_to_le32(~UBIFS_FLG_SPACE_FIXUP);
err = ubifs_write_sb_node(c, sup);
kfree(sup);
if (err)
return err;
ubifs_msg("free space fixup complete");
return err;
}
...@@ -375,7 +375,7 @@ static void ubifs_evict_inode(struct inode *inode) ...@@ -375,7 +375,7 @@ static void ubifs_evict_inode(struct inode *inode)
ubifs_release_dirty_inode_budget(c, ui); ubifs_release_dirty_inode_budget(c, ui);
else { else {
/* We've deleted something - clean the "no space" flags */ /* We've deleted something - clean the "no space" flags */
c->nospace = c->nospace_rp = 0; c->bi.nospace = c->bi.nospace_rp = 0;
smp_wmb(); smp_wmb();
} }
done: done:
...@@ -694,11 +694,11 @@ static int init_constants_sb(struct ubifs_info *c) ...@@ -694,11 +694,11 @@ static int init_constants_sb(struct ubifs_info *c)
* be compressed and direntries are of the maximum size. * be compressed and direntries are of the maximum size.
* *
* Note, data, which may be stored in inodes is budgeted separately, so * Note, data, which may be stored in inodes is budgeted separately, so
* it is not included into 'c->inode_budget'. * it is not included into 'c->bi.inode_budget'.
*/ */
c->page_budget = UBIFS_MAX_DATA_NODE_SZ * UBIFS_BLOCKS_PER_PAGE; c->bi.page_budget = UBIFS_MAX_DATA_NODE_SZ * UBIFS_BLOCKS_PER_PAGE;
c->inode_budget = UBIFS_INO_NODE_SZ; c->bi.inode_budget = UBIFS_INO_NODE_SZ;
c->dent_budget = UBIFS_MAX_DENT_NODE_SZ; c->bi.dent_budget = UBIFS_MAX_DENT_NODE_SZ;
/* /*
* When the amount of flash space used by buds becomes * When the amount of flash space used by buds becomes
...@@ -742,7 +742,7 @@ static void init_constants_master(struct ubifs_info *c) ...@@ -742,7 +742,7 @@ static void init_constants_master(struct ubifs_info *c)
{ {
long long tmp64; long long tmp64;
c->min_idx_lebs = ubifs_calc_min_idx_lebs(c); c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
c->report_rp_size = ubifs_reported_space(c, c->rp_size); c->report_rp_size = ubifs_reported_space(c, c->rp_size);
/* /*
...@@ -1144,8 +1144,8 @@ static int check_free_space(struct ubifs_info *c) ...@@ -1144,8 +1144,8 @@ static int check_free_space(struct ubifs_info *c)
{ {
ubifs_assert(c->dark_wm > 0); ubifs_assert(c->dark_wm > 0);
if (c->lst.total_free + c->lst.total_dirty < c->dark_wm) { if (c->lst.total_free + c->lst.total_dirty < c->dark_wm) {
ubifs_err("insufficient free space to mount in read/write mode"); ubifs_err("insufficient free space to mount in R/W mode");
dbg_dump_budg(c); dbg_dump_budg(c, &c->bi);
dbg_dump_lprops(c); dbg_dump_lprops(c);
return -ENOSPC; return -ENOSPC;
} }
...@@ -1304,7 +1304,7 @@ static int mount_ubifs(struct ubifs_info *c) ...@@ -1304,7 +1304,7 @@ static int mount_ubifs(struct ubifs_info *c)
if (err) if (err)
goto out_lpt; goto out_lpt;
err = dbg_check_idx_size(c, c->old_idx_sz); err = dbg_check_idx_size(c, c->bi.old_idx_sz);
if (err) if (err)
goto out_lpt; goto out_lpt;
...@@ -1313,7 +1313,7 @@ static int mount_ubifs(struct ubifs_info *c) ...@@ -1313,7 +1313,7 @@ static int mount_ubifs(struct ubifs_info *c)
goto out_journal; goto out_journal;
/* Calculate 'min_idx_lebs' after journal replay */ /* Calculate 'min_idx_lebs' after journal replay */
c->min_idx_lebs = ubifs_calc_min_idx_lebs(c); c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
err = ubifs_mount_orphans(c, c->need_recovery, c->ro_mount); err = ubifs_mount_orphans(c, c->need_recovery, c->ro_mount);
if (err) if (err)
...@@ -1396,6 +1396,12 @@ static int mount_ubifs(struct ubifs_info *c) ...@@ -1396,6 +1396,12 @@ static int mount_ubifs(struct ubifs_info *c)
} else } else
ubifs_assert(c->lst.taken_empty_lebs > 0); ubifs_assert(c->lst.taken_empty_lebs > 0);
if (!c->ro_mount && c->space_fixup) {
err = ubifs_fixup_free_space(c);
if (err)
goto out_infos;
}
err = dbg_check_filesystem(c); err = dbg_check_filesystem(c);
if (err) if (err)
goto out_infos; goto out_infos;
...@@ -1442,7 +1448,8 @@ static int mount_ubifs(struct ubifs_info *c) ...@@ -1442,7 +1448,8 @@ static int mount_ubifs(struct ubifs_info *c)
c->main_lebs, c->main_first, c->leb_cnt - 1); c->main_lebs, c->main_first, c->leb_cnt - 1);
dbg_msg("index LEBs: %d", c->lst.idx_lebs); dbg_msg("index LEBs: %d", c->lst.idx_lebs);
dbg_msg("total index bytes: %lld (%lld KiB, %lld MiB)", dbg_msg("total index bytes: %lld (%lld KiB, %lld MiB)",
c->old_idx_sz, c->old_idx_sz >> 10, c->old_idx_sz >> 20); c->bi.old_idx_sz, c->bi.old_idx_sz >> 10,
c->bi.old_idx_sz >> 20);
dbg_msg("key hash type: %d", c->key_hash_type); dbg_msg("key hash type: %d", c->key_hash_type);
dbg_msg("tree fanout: %d", c->fanout); dbg_msg("tree fanout: %d", c->fanout);
dbg_msg("reserved GC LEB: %d", c->gc_lnum); dbg_msg("reserved GC LEB: %d", c->gc_lnum);
...@@ -1456,7 +1463,7 @@ static int mount_ubifs(struct ubifs_info *c) ...@@ -1456,7 +1463,7 @@ static int mount_ubifs(struct ubifs_info *c)
dbg_msg("node sizes: ref %zu, cmt. start %zu, orph %zu", dbg_msg("node sizes: ref %zu, cmt. start %zu, orph %zu",
UBIFS_REF_NODE_SZ, UBIFS_CS_NODE_SZ, UBIFS_ORPH_NODE_SZ); UBIFS_REF_NODE_SZ, UBIFS_CS_NODE_SZ, UBIFS_ORPH_NODE_SZ);
dbg_msg("max. node sizes: data %zu, inode %zu dentry %zu, idx %d", dbg_msg("max. node sizes: data %zu, inode %zu dentry %zu, idx %d",
UBIFS_MAX_DATA_NODE_SZ, UBIFS_MAX_INO_NODE_SZ, UBIFS_MAX_DATA_NODE_SZ, UBIFS_MAX_INO_NODE_SZ,
UBIFS_MAX_DENT_NODE_SZ, ubifs_idx_node_sz(c, c->fanout)); UBIFS_MAX_DENT_NODE_SZ, ubifs_idx_node_sz(c, c->fanout));
dbg_msg("dead watermark: %d", c->dead_wm); dbg_msg("dead watermark: %d", c->dead_wm);
dbg_msg("dark watermark: %d", c->dark_wm); dbg_msg("dark watermark: %d", c->dark_wm);
...@@ -1584,6 +1591,7 @@ static int ubifs_remount_rw(struct ubifs_info *c) ...@@ -1584,6 +1591,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
} }
sup->leb_cnt = cpu_to_le32(c->leb_cnt); sup->leb_cnt = cpu_to_le32(c->leb_cnt);
err = ubifs_write_sb_node(c, sup); err = ubifs_write_sb_node(c, sup);
kfree(sup);
if (err) if (err)
goto out; goto out;
} }
...@@ -1684,6 +1692,13 @@ static int ubifs_remount_rw(struct ubifs_info *c) ...@@ -1684,6 +1692,13 @@ static int ubifs_remount_rw(struct ubifs_info *c)
*/ */
err = dbg_check_space_info(c); err = dbg_check_space_info(c);
} }
if (c->space_fixup) {
err = ubifs_fixup_free_space(c);
if (err)
goto out;
}
mutex_unlock(&c->umount_mutex); mutex_unlock(&c->umount_mutex);
return err; return err;
...@@ -1766,10 +1781,9 @@ static void ubifs_put_super(struct super_block *sb) ...@@ -1766,10 +1781,9 @@ static void ubifs_put_super(struct super_block *sb)
* to write them back because of I/O errors. * to write them back because of I/O errors.
*/ */
if (!c->ro_error) { if (!c->ro_error) {
ubifs_assert(atomic_long_read(&c->dirty_pg_cnt) == 0); ubifs_assert(c->bi.idx_growth == 0);
ubifs_assert(c->budg_idx_growth == 0); ubifs_assert(c->bi.dd_growth == 0);
ubifs_assert(c->budg_dd_growth == 0); ubifs_assert(c->bi.data_growth == 0);
ubifs_assert(c->budg_data_growth == 0);
} }
/* /*
......
...@@ -2557,11 +2557,11 @@ int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key, ...@@ -2557,11 +2557,11 @@ int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key,
if (err) { if (err) {
/* Ensure the znode is dirtied */ /* Ensure the znode is dirtied */
if (znode->cnext || !ubifs_zn_dirty(znode)) { if (znode->cnext || !ubifs_zn_dirty(znode)) {
znode = dirty_cow_bottom_up(c, znode); znode = dirty_cow_bottom_up(c, znode);
if (IS_ERR(znode)) { if (IS_ERR(znode)) {
err = PTR_ERR(znode); err = PTR_ERR(znode);
goto out_unlock; goto out_unlock;
} }
} }
err = tnc_delete(c, znode, n); err = tnc_delete(c, znode, n);
} }
......
...@@ -377,15 +377,13 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt) ...@@ -377,15 +377,13 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
c->gap_lebs = NULL; c->gap_lebs = NULL;
return err; return err;
} }
if (!dbg_force_in_the_gaps_enabled) { if (dbg_force_in_the_gaps_enabled()) {
/* /*
* Do not print scary warnings if the debugging * Do not print scary warnings if the debugging
* option which forces in-the-gaps is enabled. * option which forces in-the-gaps is enabled.
*/ */
ubifs_err("out of space"); ubifs_warn("out of space");
spin_lock(&c->space_lock); dbg_dump_budg(c, &c->bi);
dbg_dump_budg(c);
spin_unlock(&c->space_lock);
dbg_dump_lprops(c); dbg_dump_lprops(c);
} }
/* Try to commit anyway */ /* Try to commit anyway */
...@@ -796,16 +794,16 @@ int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot) ...@@ -796,16 +794,16 @@ int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot)
spin_lock(&c->space_lock); spin_lock(&c->space_lock);
/* /*
* Although we have not finished committing yet, update size of the * Although we have not finished committing yet, update size of the
* committed index ('c->old_idx_sz') and zero out the index growth * committed index ('c->bi.old_idx_sz') and zero out the index growth
* budget. It is OK to do this now, because we've reserved all the * budget. It is OK to do this now, because we've reserved all the
* space which is needed to commit the index, and it is save for the * space which is needed to commit the index, and it is save for the
* budgeting subsystem to assume the index is already committed, * budgeting subsystem to assume the index is already committed,
* even though it is not. * even though it is not.
*/ */
ubifs_assert(c->min_idx_lebs == ubifs_calc_min_idx_lebs(c)); ubifs_assert(c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c));
c->old_idx_sz = c->calc_idx_sz; c->bi.old_idx_sz = c->calc_idx_sz;
c->budg_uncommitted_idx = 0; c->bi.uncommitted_idx = 0;
c->min_idx_lebs = ubifs_calc_min_idx_lebs(c); c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
spin_unlock(&c->space_lock); spin_unlock(&c->space_lock);
mutex_unlock(&c->tnc_mutex); mutex_unlock(&c->tnc_mutex);
......
...@@ -408,9 +408,11 @@ enum { ...@@ -408,9 +408,11 @@ enum {
* Superblock flags. * Superblock flags.
* *
* UBIFS_FLG_BIGLPT: if "big" LPT model is used if set * UBIFS_FLG_BIGLPT: if "big" LPT model is used if set
* UBIFS_FLG_SPACE_FIXUP: first-mount "fixup" of free space within LEBs needed
*/ */
enum { enum {
UBIFS_FLG_BIGLPT = 0x02, UBIFS_FLG_BIGLPT = 0x02,
UBIFS_FLG_SPACE_FIXUP = 0x04,
}; };
/** /**
...@@ -434,7 +436,7 @@ struct ubifs_ch { ...@@ -434,7 +436,7 @@ struct ubifs_ch {
__u8 node_type; __u8 node_type;
__u8 group_type; __u8 group_type;
__u8 padding[2]; __u8 padding[2];
} __attribute__ ((packed)); } __packed;
/** /**
* union ubifs_dev_desc - device node descriptor. * union ubifs_dev_desc - device node descriptor.
...@@ -448,7 +450,7 @@ struct ubifs_ch { ...@@ -448,7 +450,7 @@ struct ubifs_ch {
union ubifs_dev_desc { union ubifs_dev_desc {
__le32 new; __le32 new;
__le64 huge; __le64 huge;
} __attribute__ ((packed)); } __packed;
/** /**
* struct ubifs_ino_node - inode node. * struct ubifs_ino_node - inode node.
...@@ -509,7 +511,7 @@ struct ubifs_ino_node { ...@@ -509,7 +511,7 @@ struct ubifs_ino_node {
__le16 compr_type; __le16 compr_type;
__u8 padding2[26]; /* Watch 'zero_ino_node_unused()' if changing! */ __u8 padding2[26]; /* Watch 'zero_ino_node_unused()' if changing! */
__u8 data[]; __u8 data[];
} __attribute__ ((packed)); } __packed;
/** /**
* struct ubifs_dent_node - directory entry node. * struct ubifs_dent_node - directory entry node.
...@@ -534,7 +536,7 @@ struct ubifs_dent_node { ...@@ -534,7 +536,7 @@ struct ubifs_dent_node {
__le16 nlen; __le16 nlen;
__u8 padding2[4]; /* Watch 'zero_dent_node_unused()' if changing! */ __u8 padding2[4]; /* Watch 'zero_dent_node_unused()' if changing! */
__u8 name[]; __u8 name[];
} __attribute__ ((packed)); } __packed;
/** /**
* struct ubifs_data_node - data node. * struct ubifs_data_node - data node.
...@@ -555,7 +557,7 @@ struct ubifs_data_node { ...@@ -555,7 +557,7 @@ struct ubifs_data_node {
__le16 compr_type; __le16 compr_type;
__u8 padding[2]; /* Watch 'zero_data_node_unused()' if changing! */ __u8 padding[2]; /* Watch 'zero_data_node_unused()' if changing! */
__u8 data[]; __u8 data[];
} __attribute__ ((packed)); } __packed;
/** /**
* struct ubifs_trun_node - truncation node. * struct ubifs_trun_node - truncation node.
...@@ -575,7 +577,7 @@ struct ubifs_trun_node { ...@@ -575,7 +577,7 @@ struct ubifs_trun_node {
__u8 padding[12]; /* Watch 'zero_trun_node_unused()' if changing! */ __u8 padding[12]; /* Watch 'zero_trun_node_unused()' if changing! */
__le64 old_size; __le64 old_size;
__le64 new_size; __le64 new_size;
} __attribute__ ((packed)); } __packed;
/** /**
* struct ubifs_pad_node - padding node. * struct ubifs_pad_node - padding node.
...@@ -586,7 +588,7 @@ struct ubifs_trun_node { ...@@ -586,7 +588,7 @@ struct ubifs_trun_node {
struct ubifs_pad_node { struct ubifs_pad_node {
struct ubifs_ch ch; struct ubifs_ch ch;
__le32 pad_len; __le32 pad_len;
} __attribute__ ((packed)); } __packed;
/** /**
* struct ubifs_sb_node - superblock node. * struct ubifs_sb_node - superblock node.
...@@ -644,7 +646,7 @@ struct ubifs_sb_node { ...@@ -644,7 +646,7 @@ struct ubifs_sb_node {
__u8 uuid[16]; __u8 uuid[16];
__le32 ro_compat_version; __le32 ro_compat_version;
__u8 padding2[3968]; __u8 padding2[3968];
} __attribute__ ((packed)); } __packed;
/** /**
* struct ubifs_mst_node - master node. * struct ubifs_mst_node - master node.
...@@ -711,7 +713,7 @@ struct ubifs_mst_node { ...@@ -711,7 +713,7 @@ struct ubifs_mst_node {
__le32 idx_lebs; __le32 idx_lebs;
__le32 leb_cnt; __le32 leb_cnt;
__u8 padding[344]; __u8 padding[344];
} __attribute__ ((packed)); } __packed;
/** /**
* struct ubifs_ref_node - logical eraseblock reference node. * struct ubifs_ref_node - logical eraseblock reference node.
...@@ -727,7 +729,7 @@ struct ubifs_ref_node { ...@@ -727,7 +729,7 @@ struct ubifs_ref_node {
__le32 offs; __le32 offs;
__le32 jhead; __le32 jhead;
__u8 padding[28]; __u8 padding[28];
} __attribute__ ((packed)); } __packed;
/** /**
* struct ubifs_branch - key/reference/length branch * struct ubifs_branch - key/reference/length branch
...@@ -741,7 +743,7 @@ struct ubifs_branch { ...@@ -741,7 +743,7 @@ struct ubifs_branch {
__le32 offs; __le32 offs;
__le32 len; __le32 len;
__u8 key[]; __u8 key[];
} __attribute__ ((packed)); } __packed;
/** /**
* struct ubifs_idx_node - indexing node. * struct ubifs_idx_node - indexing node.
...@@ -755,7 +757,7 @@ struct ubifs_idx_node { ...@@ -755,7 +757,7 @@ struct ubifs_idx_node {
__le16 child_cnt; __le16 child_cnt;
__le16 level; __le16 level;
__u8 branches[]; __u8 branches[];
} __attribute__ ((packed)); } __packed;
/** /**
* struct ubifs_cs_node - commit start node. * struct ubifs_cs_node - commit start node.
...@@ -765,7 +767,7 @@ struct ubifs_idx_node { ...@@ -765,7 +767,7 @@ struct ubifs_idx_node {
struct ubifs_cs_node { struct ubifs_cs_node {
struct ubifs_ch ch; struct ubifs_ch ch;
__le64 cmt_no; __le64 cmt_no;
} __attribute__ ((packed)); } __packed;
/** /**
* struct ubifs_orph_node - orphan node. * struct ubifs_orph_node - orphan node.
...@@ -777,6 +779,6 @@ struct ubifs_orph_node { ...@@ -777,6 +779,6 @@ struct ubifs_orph_node {
struct ubifs_ch ch; struct ubifs_ch ch;
__le64 cmt_no; __le64 cmt_no;
__le64 inos[]; __le64 inos[];
} __attribute__ ((packed)); } __packed;
#endif /* __UBIFS_MEDIA_H__ */ #endif /* __UBIFS_MEDIA_H__ */
...@@ -389,9 +389,9 @@ struct ubifs_gced_idx_leb { ...@@ -389,9 +389,9 @@ struct ubifs_gced_idx_leb {
* The @ui_size is a "shadow" variable for @inode->i_size and UBIFS uses * The @ui_size is a "shadow" variable for @inode->i_size and UBIFS uses
* @ui_size instead of @inode->i_size. The reason for this is that UBIFS cannot * @ui_size instead of @inode->i_size. The reason for this is that UBIFS cannot
* make sure @inode->i_size is always changed under @ui_mutex, because it * make sure @inode->i_size is always changed under @ui_mutex, because it
* cannot call 'truncate_setsize()' with @ui_mutex locked, because it would deadlock * cannot call 'truncate_setsize()' with @ui_mutex locked, because it would
* with 'ubifs_writepage()' (see file.c). All the other inode fields are * deadlock with 'ubifs_writepage()' (see file.c). All the other inode fields
* changed under @ui_mutex, so they do not need "shadow" fields. Note, one * are changed under @ui_mutex, so they do not need "shadow" fields. Note, one
* could consider to rework locking and base it on "shadow" fields. * could consider to rework locking and base it on "shadow" fields.
*/ */
struct ubifs_inode { struct ubifs_inode {
...@@ -937,6 +937,40 @@ struct ubifs_mount_opts { ...@@ -937,6 +937,40 @@ struct ubifs_mount_opts {
unsigned int compr_type:2; unsigned int compr_type:2;
}; };
/**
* struct ubifs_budg_info - UBIFS budgeting information.
* @idx_growth: amount of bytes budgeted for index growth
* @data_growth: amount of bytes budgeted for cached data
* @dd_growth: amount of bytes budgeted for cached data that will make
* other data dirty
* @uncommitted_idx: amount of bytes were budgeted for growth of the index, but
* which still have to be taken into account because the index
* has not been committed so far
* @old_idx_sz: size of index on flash
* @min_idx_lebs: minimum number of LEBs required for the index
* @nospace: non-zero if the file-system does not have flash space (used as
* optimization)
* @nospace_rp: the same as @nospace, but additionally means that even reserved
* pool is full
* @page_budget: budget for a page (constant, nenver changed after mount)
* @inode_budget: budget for an inode (constant, nenver changed after mount)
* @dent_budget: budget for a directory entry (constant, nenver changed after
* mount)
*/
struct ubifs_budg_info {
long long idx_growth;
long long data_growth;
long long dd_growth;
long long uncommitted_idx;
unsigned long long old_idx_sz;
int min_idx_lebs;
unsigned int nospace:1;
unsigned int nospace_rp:1;
int page_budget;
int inode_budget;
int dent_budget;
};
struct ubifs_debug_info; struct ubifs_debug_info;
/** /**
...@@ -980,6 +1014,7 @@ struct ubifs_debug_info; ...@@ -980,6 +1014,7 @@ struct ubifs_debug_info;
* @cmt_wq: wait queue to sleep on if the log is full and a commit is running * @cmt_wq: wait queue to sleep on if the log is full and a commit is running
* *
* @big_lpt: flag that LPT is too big to write whole during commit * @big_lpt: flag that LPT is too big to write whole during commit
* @space_fixup: flag indicating that free space in LEBs needs to be cleaned up
* @no_chk_data_crc: do not check CRCs when reading data nodes (except during * @no_chk_data_crc: do not check CRCs when reading data nodes (except during
* recovery) * recovery)
* @bulk_read: enable bulk-reads * @bulk_read: enable bulk-reads
...@@ -1057,32 +1092,14 @@ struct ubifs_debug_info; ...@@ -1057,32 +1092,14 @@ struct ubifs_debug_info;
* @dirty_zn_cnt: number of dirty znodes * @dirty_zn_cnt: number of dirty znodes
* @clean_zn_cnt: number of clean znodes * @clean_zn_cnt: number of clean znodes
* *
* @budg_idx_growth: amount of bytes budgeted for index growth * @space_lock: protects @bi and @lst
* @budg_data_growth: amount of bytes budgeted for cached data * @lst: lprops statistics
* @budg_dd_growth: amount of bytes budgeted for cached data that will make * @bi: budgeting information
* other data dirty
* @budg_uncommitted_idx: amount of bytes were budgeted for growth of the index,
* but which still have to be taken into account because
* the index has not been committed so far
* @space_lock: protects @budg_idx_growth, @budg_data_growth, @budg_dd_growth,
* @budg_uncommited_idx, @min_idx_lebs, @old_idx_sz, @lst,
* @nospace, and @nospace_rp;
* @min_idx_lebs: minimum number of LEBs required for the index
* @old_idx_sz: size of index on flash
* @calc_idx_sz: temporary variable which is used to calculate new index size * @calc_idx_sz: temporary variable which is used to calculate new index size
* (contains accurate new index size at end of TNC commit start) * (contains accurate new index size at end of TNC commit start)
* @lst: lprops statistics
* @nospace: non-zero if the file-system does not have flash space (used as
* optimization)
* @nospace_rp: the same as @nospace, but additionally means that even reserved
* pool is full
*
* @page_budget: budget for a page
* @inode_budget: budget for an inode
* @dent_budget: budget for a directory entry
* *
* @ref_node_alsz: size of the LEB reference node aligned to the min. flash * @ref_node_alsz: size of the LEB reference node aligned to the min. flash
* I/O unit * I/O unit
* @mst_node_alsz: master node aligned size * @mst_node_alsz: master node aligned size
* @min_idx_node_sz: minimum indexing node aligned on 8-bytes boundary * @min_idx_node_sz: minimum indexing node aligned on 8-bytes boundary
* @max_idx_node_sz: maximum indexing node aligned on 8-bytes boundary * @max_idx_node_sz: maximum indexing node aligned on 8-bytes boundary
...@@ -1189,7 +1206,6 @@ struct ubifs_debug_info; ...@@ -1189,7 +1206,6 @@ struct ubifs_debug_info;
* @replaying: %1 during journal replay * @replaying: %1 during journal replay
* @mounting: %1 while mounting * @mounting: %1 while mounting
* @remounting_rw: %1 while re-mounting from R/O mode to R/W mode * @remounting_rw: %1 while re-mounting from R/O mode to R/W mode
* @replay_tree: temporary tree used during journal replay
* @replay_list: temporary list used during journal replay * @replay_list: temporary list used during journal replay
* @replay_buds: list of buds to replay * @replay_buds: list of buds to replay
* @cs_sqnum: sequence number of first node in the log (commit start node) * @cs_sqnum: sequence number of first node in the log (commit start node)
...@@ -1238,6 +1254,7 @@ struct ubifs_info { ...@@ -1238,6 +1254,7 @@ struct ubifs_info {
wait_queue_head_t cmt_wq; wait_queue_head_t cmt_wq;
unsigned int big_lpt:1; unsigned int big_lpt:1;
unsigned int space_fixup:1;
unsigned int no_chk_data_crc:1; unsigned int no_chk_data_crc:1;
unsigned int bulk_read:1; unsigned int bulk_read:1;
unsigned int default_compr:2; unsigned int default_compr:2;
...@@ -1308,21 +1325,10 @@ struct ubifs_info { ...@@ -1308,21 +1325,10 @@ struct ubifs_info {
atomic_long_t dirty_zn_cnt; atomic_long_t dirty_zn_cnt;
atomic_long_t clean_zn_cnt; atomic_long_t clean_zn_cnt;
long long budg_idx_growth;
long long budg_data_growth;
long long budg_dd_growth;
long long budg_uncommitted_idx;
spinlock_t space_lock; spinlock_t space_lock;
int min_idx_lebs;
unsigned long long old_idx_sz;
unsigned long long calc_idx_sz;
struct ubifs_lp_stats lst; struct ubifs_lp_stats lst;
unsigned int nospace:1; struct ubifs_budg_info bi;
unsigned int nospace_rp:1; unsigned long long calc_idx_sz;
int page_budget;
int inode_budget;
int dent_budget;
int ref_node_alsz; int ref_node_alsz;
int mst_node_alsz; int mst_node_alsz;
...@@ -1430,7 +1436,6 @@ struct ubifs_info { ...@@ -1430,7 +1436,6 @@ struct ubifs_info {
unsigned int replaying:1; unsigned int replaying:1;
unsigned int mounting:1; unsigned int mounting:1;
unsigned int remounting_rw:1; unsigned int remounting_rw:1;
struct rb_root replay_tree;
struct list_head replay_list; struct list_head replay_list;
struct list_head replay_buds; struct list_head replay_buds;
unsigned long long cs_sqnum; unsigned long long cs_sqnum;
...@@ -1628,6 +1633,7 @@ int ubifs_write_master(struct ubifs_info *c); ...@@ -1628,6 +1633,7 @@ int ubifs_write_master(struct ubifs_info *c);
int ubifs_read_superblock(struct ubifs_info *c); int ubifs_read_superblock(struct ubifs_info *c);
struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c); struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c);
int ubifs_write_sb_node(struct ubifs_info *c, struct ubifs_sb_node *sup); int ubifs_write_sb_node(struct ubifs_info *c, struct ubifs_sb_node *sup);
int ubifs_fixup_free_space(struct ubifs_info *c);
/* replay.c */ /* replay.c */
int ubifs_validate_entry(struct ubifs_info *c, int ubifs_validate_entry(struct ubifs_info *c,
......
...@@ -80,8 +80,8 @@ enum { ...@@ -80,8 +80,8 @@ enum {
SECURITY_XATTR, SECURITY_XATTR,
}; };
static const struct inode_operations none_inode_operations; static const struct inode_operations empty_iops;
static const struct file_operations none_file_operations; static const struct file_operations empty_fops;
/** /**
* create_xattr - create an extended attribute. * create_xattr - create an extended attribute.
...@@ -131,8 +131,8 @@ static int create_xattr(struct ubifs_info *c, struct inode *host, ...@@ -131,8 +131,8 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
/* Re-define all operations to be "nothing" */ /* Re-define all operations to be "nothing" */
inode->i_mapping->a_ops = &empty_aops; inode->i_mapping->a_ops = &empty_aops;
inode->i_op = &none_inode_operations; inode->i_op = &empty_iops;
inode->i_fop = &none_file_operations; inode->i_fop = &empty_fops;
inode->i_flags |= S_SYNC | S_NOATIME | S_NOCMTIME | S_NOQUOTA; inode->i_flags |= S_SYNC | S_NOATIME | S_NOCMTIME | S_NOQUOTA;
ui = ubifs_inode(inode); ui = ubifs_inode(inode);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment