Commit d7ea196c authored by Anton Altaparmakov's avatar Anton Altaparmakov

NTFS: Complete "run list" to "runlist" renaming.

Signed-off-by: default avatarAnton Altaparmakov <aia21@cantab.net>
parent 4bb5af81
......@@ -197,7 +197,7 @@ static int ntfs_read_block(struct page *page)
#ifdef DEBUG
if (unlikely(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni)))
panic("NTFS: $MFT/$DATA run list has been unmapped! This is a "
panic("NTFS: $MFT/$DATA runlist has been unmapped! This is a "
"very serious bug! Cannot continue...");
#endif
......@@ -252,11 +252,11 @@ static int ntfs_read_block(struct page *page)
/* It is a hole, need to zero it. */
if (lcn == LCN_HOLE)
goto handle_hole;
/* If first try and run list unmapped, map and retry. */
/* If first try and runlist unmapped, map and retry. */
if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
is_retry = TRUE;
/*
* Attempt to map run list, dropping lock for
* Attempt to map runlist, dropping lock for
* the duration.
*/
up_read(&ni->runlist.lock);
......@@ -659,11 +659,11 @@ static int ntfs_write_block(struct writeback_control *wbc, struct page *page)
err = -EOPNOTSUPP;
break;
}
/* If first try and run list unmapped, map and retry. */
/* If first try and runlist unmapped, map and retry. */
if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
is_retry = TRUE;
/*
* Attempt to map run list, dropping lock for
* Attempt to map runlist, dropping lock for
* the duration.
*/
up_read(&ni->runlist.lock);
......@@ -1439,7 +1439,7 @@ static int ntfs_prepare_nonresident_write(struct page *page,
lcn == LCN_RL_NOT_MAPPED) {
is_retry = TRUE;
/*
* Attempt to map run list, dropping
* Attempt to map runlist, dropping
* lock for the duration.
*/
up_read(&ni->runlist.lock);
......
This diff is collapsed.
......@@ -575,7 +575,7 @@ int ntfs_read_compressed_block(struct page *page)
}
/*
* We have the run list, and all the destination pages we need to fill.
* We have the runlist, and all the destination pages we need to fill.
* Now read the first compression block.
*/
cur_page = 0;
......@@ -617,7 +617,7 @@ int ntfs_read_compressed_block(struct page *page)
goto rl_err;
is_retry = TRUE;
/*
* Attempt to map run list, dropping lock for the
* Attempt to map runlist, dropping lock for the
* duration.
*/
up_read(&ni->runlist.lock);
......
......@@ -132,7 +132,7 @@ void __ntfs_debug (const char *file, int line, const char *function,
spin_unlock(&err_buf_lock);
}
/* Dump a run list. Caller has to provide synchronisation for @rl. */
/* Dump a runlist. Caller has to provide synchronisation for @rl. */
void ntfs_debug_dump_runlist(const runlist_element *rl)
{
int i;
......@@ -141,7 +141,7 @@ void ntfs_debug_dump_runlist(const runlist_element *rl)
if (!debug_msgs)
return;
printk(KERN_DEBUG "NTFS-fs DEBUG: Dumping run list (values "
printk(KERN_DEBUG "NTFS-fs DEBUG: Dumping runlist (values "
"in hex):\n");
if (!rl) {
printk(KERN_DEBUG "Run list not present.\n");
......@@ -159,12 +159,12 @@ void ntfs_debug_dump_runlist(const runlist_element *rl)
printk(KERN_DEBUG "%-16Lx %s %-16Lx%s\n",
(rl + i)->vcn, lcn_str[index],
(rl + i)->length, (rl + i)->length ?
"" : " (run list end)");
"" : " (runlist end)");
} else
printk(KERN_DEBUG "%-16Lx %-16Lx %-16Lx%s\n",
(rl + i)->vcn, (rl + i)->lcn,
(rl + i)->length, (rl + i)->length ?
"" : " (run list end)");
"" : " (runlist end)");
if (!(rl + i)->length)
break;
}
......
......@@ -680,7 +680,7 @@ static int ntfs_read_locked_inode(struct inode *vi)
goto unm_err_out;
}
/*
* Setup the run list. No need for locking as we have
* Setup the runlist. No need for locking as we have
* exclusive access to the inode at this time.
*/
ni->attr_list_rl.rl = decompress_mapping_pairs(vol,
......@@ -1757,7 +1757,7 @@ int ntfs_read_inode_mount(struct inode *vi)
"You should run chkdsk.");
goto put_err_out;
}
/* Setup the run list. */
/* Setup the runlist. */
ni->attr_list_rl.rl = decompress_mapping_pairs(vol,
ctx->attr, NULL);
if (IS_ERR(ni->attr_list_rl.rl)) {
......@@ -1885,7 +1885,7 @@ int ntfs_read_inode_mount(struct inode *vi)
}
/*
* Decompress the mapping pairs array of this extent and merge
* the result into the existing run list. No need for locking
* the result into the existing runlist. No need for locking
* as we have exclusive access to the inode at this time and we
* are a mount in progress task, too.
*/
......@@ -2001,7 +2001,7 @@ int ntfs_read_inode_mount(struct inode *vi)
goto put_err_out;
}
if (highest_vcn && highest_vcn != last_vcn - 1) {
ntfs_error(sb, "Failed to load the complete run list "
ntfs_error(sb, "Failed to load the complete runlist "
"for $MFT/$DATA. Driver bug or "
"corrupt $MFT. Run chkdsk.");
ntfs_debug("highest_vcn = 0x%llx, last_vcn - 1 = 0x%llx",
......
......@@ -57,11 +57,11 @@ struct _ntfs_inode {
ntfschar *name; /* Attribute name of this fake inode. */
u32 name_len; /* Attribute name length of this fake inode. */
runlist runlist; /* If state has the NI_NonResident bit set,
the run list of the unnamed data attribute
the runlist of the unnamed data attribute
(if a file) or of the index allocation
attribute (directory) or of the attribute
described by the fake inode (if NInoAttr()).
If runlist.rl is NULL, the run list has not
If runlist.rl is NULL, the runlist has not
been read in yet or has been unmapped. If
NI_NonResident is clear, the attribute is
resident (file and fake inode) or there is
......
......@@ -545,7 +545,7 @@ typedef enum {
* can be stored:
*
* 1) The data in the block is all zero (a sparse block):
* This is stored as a sparse block in the run list, i.e. the run list
* This is stored as a sparse block in the runlist, i.e. the runlist
* entry has length = X and lcn = -1. The mapping pairs array actually
* uses a delta_lcn value length of 0, i.e. delta_lcn is not present at
* all, which is then interpreted by the driver as lcn = -1.
......@@ -558,7 +558,7 @@ typedef enum {
* in clusters. I.e. if compression has a small effect so that the
* compressed data still occupies X clusters, then the uncompressed data
* is stored in the block.
* This case is recognised by the fact that the run list entry has
* This case is recognised by the fact that the runlist entry has
* length = X and lcn >= 0. The mapping pairs array stores this as
* normal with a run length of X and some specific delta_lcn, i.e.
* delta_lcn has to be present.
......@@ -567,7 +567,7 @@ typedef enum {
* The common case. This case is recognised by the fact that the run
* list entry has length L < X and lcn >= 0. The mapping pairs array
* stores this as normal with a run length of X and some specific
* delta_lcn, i.e. delta_lcn has to be present. This run list entry is
* delta_lcn, i.e. delta_lcn has to be present. This runlist entry is
* immediately followed by a sparse entry with length = X - L and
* lcn = -1. The latter entry is to make up the vcn counting to the
* full compression block size X.
......@@ -575,15 +575,15 @@ typedef enum {
* In fact, life is more complicated because adjacent entries of the same type
* can be coalesced. This means that one has to keep track of the number of
* clusters handled and work on a basis of X clusters at a time being one
* block. An example: if length L > X this means that this particular run list
* block. An example: if length L > X this means that this particular runlist
* entry contains a block of length X and part of one or more blocks of length
* L - X. Another example: if length L < X, this does not necessarily mean that
* the block is compressed as it might be that the lcn changes inside the block
* and hence the following run list entry describes the continuation of the
* and hence the following runlist entry describes the continuation of the
* potentially compressed block. The block would be compressed if the
* following run list entry describes at least X - L sparse clusters, thus
* following runlist entry describes at least X - L sparse clusters, thus
* making up the compression block length as described in point 3 above. (Of
* course, there can be several run list entries with small lengths so that the
* course, there can be several runlist entries with small lengths so that the
* sparse entry does not follow the first data containing entry with
* length < X.)
*
......
......@@ -986,7 +986,7 @@ static BOOL check_mft_mirror(ntfs_volume *vol)
ntfs_unmap_page(mft_page);
ntfs_unmap_page(mirr_page);
/* Construct the mft mirror run list by hand. */
/* Construct the mft mirror runlist by hand. */
rl2[0].vcn = 0;
rl2[0].lcn = vol->mftmirr_lcn;
rl2[0].length = (vol->mftmirr_size * vol->mft_record_size +
......@@ -996,12 +996,12 @@ static BOOL check_mft_mirror(ntfs_volume *vol)
rl2[1].length = 0;
/*
* Because we have just read all of the mft mirror, we know we have
* mapped the full run list for it.
* mapped the full runlist for it.
*/
mirr_ni = NTFS_I(vol->mftmirr_ino);
down_read(&mirr_ni->runlist.lock);
rl = mirr_ni->runlist.rl;
/* Compare the two run lists. They must be identical. */
/* Compare the two runlists. They must be identical. */
i = 0;
do {
if (rl2[i].vcn != rl[i].vcn || rl2[i].lcn != rl[i].lcn ||
......
......@@ -60,7 +60,7 @@ typedef struct { /* In memory vcn to lcn mapping structure element. */
/**
* runlist - in memory vcn to lcn mapping array including a read/write lock
* @rl: pointer to an array of run list elements
* @rl: pointer to an array of runlist elements
* @lock: read/write spinlock for serializing access to @rl
*
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment