Commit c49c3111 authored by Richard Knutsson's avatar Richard Knutsson Committed by Linus Torvalds

[PATCH] fs/ntfs: Conversion to generic boolean

Conversion of booleans to: generic-boolean.patch (2006-08-23)
Signed-off-by: default avatarRichard Knutsson <ricknu-0@student.ltu.se>
Signed-off-by: default avatarAnton Altaparmakov <aia21@cantab.net>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 6e218287
...@@ -254,7 +254,7 @@ static int ntfs_read_block(struct page *page) ...@@ -254,7 +254,7 @@ static int ntfs_read_block(struct page *page)
bh->b_bdev = vol->sb->s_bdev; bh->b_bdev = vol->sb->s_bdev;
/* Is the block within the allowed limits? */ /* Is the block within the allowed limits? */
if (iblock < lblock) { if (iblock < lblock) {
BOOL is_retry = FALSE; bool is_retry = false;
/* Convert iblock into corresponding vcn and offset. */ /* Convert iblock into corresponding vcn and offset. */
vcn = (VCN)iblock << blocksize_bits >> vcn = (VCN)iblock << blocksize_bits >>
...@@ -292,7 +292,7 @@ static int ntfs_read_block(struct page *page) ...@@ -292,7 +292,7 @@ static int ntfs_read_block(struct page *page)
goto handle_hole; goto handle_hole;
/* If first try and runlist unmapped, map and retry. */ /* If first try and runlist unmapped, map and retry. */
if (!is_retry && lcn == LCN_RL_NOT_MAPPED) { if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
is_retry = TRUE; is_retry = true;
/* /*
* Attempt to map runlist, dropping lock for * Attempt to map runlist, dropping lock for
* the duration. * the duration.
...@@ -558,7 +558,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc) ...@@ -558,7 +558,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
unsigned long flags; unsigned long flags;
unsigned int blocksize, vcn_ofs; unsigned int blocksize, vcn_ofs;
int err; int err;
BOOL need_end_writeback; bool need_end_writeback;
unsigned char blocksize_bits; unsigned char blocksize_bits;
vi = page->mapping->host; vi = page->mapping->host;
...@@ -626,7 +626,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc) ...@@ -626,7 +626,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
rl = NULL; rl = NULL;
err = 0; err = 0;
do { do {
BOOL is_retry = FALSE; bool is_retry = false;
if (unlikely(block >= dblock)) { if (unlikely(block >= dblock)) {
/* /*
...@@ -768,7 +768,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc) ...@@ -768,7 +768,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
} }
/* If first try and runlist unmapped, map and retry. */ /* If first try and runlist unmapped, map and retry. */
if (!is_retry && lcn == LCN_RL_NOT_MAPPED) { if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
is_retry = TRUE; is_retry = true;
/* /*
* Attempt to map runlist, dropping lock for * Attempt to map runlist, dropping lock for
* the duration. * the duration.
...@@ -874,12 +874,12 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc) ...@@ -874,12 +874,12 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
set_page_writeback(page); /* Keeps try_to_free_buffers() away. */ set_page_writeback(page); /* Keeps try_to_free_buffers() away. */
/* Submit the prepared buffers for i/o. */ /* Submit the prepared buffers for i/o. */
need_end_writeback = TRUE; need_end_writeback = true;
do { do {
struct buffer_head *next = bh->b_this_page; struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) { if (buffer_async_write(bh)) {
submit_bh(WRITE, bh); submit_bh(WRITE, bh);
need_end_writeback = FALSE; need_end_writeback = false;
} }
bh = next; bh = next;
} while (bh != head); } while (bh != head);
...@@ -932,7 +932,7 @@ static int ntfs_write_mst_block(struct page *page, ...@@ -932,7 +932,7 @@ static int ntfs_write_mst_block(struct page *page,
runlist_element *rl; runlist_element *rl;
int i, nr_locked_nis, nr_recs, nr_bhs, max_bhs, bhs_per_rec, err, err2; int i, nr_locked_nis, nr_recs, nr_bhs, max_bhs, bhs_per_rec, err, err2;
unsigned bh_size, rec_size_bits; unsigned bh_size, rec_size_bits;
BOOL sync, is_mft, page_is_dirty, rec_is_dirty; bool sync, is_mft, page_is_dirty, rec_is_dirty;
unsigned char bh_size_bits; unsigned char bh_size_bits;
ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index " ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
...@@ -975,10 +975,10 @@ static int ntfs_write_mst_block(struct page *page, ...@@ -975,10 +975,10 @@ static int ntfs_write_mst_block(struct page *page,
rl = NULL; rl = NULL;
err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0; err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0;
page_is_dirty = rec_is_dirty = FALSE; page_is_dirty = rec_is_dirty = false;
rec_start_bh = NULL; rec_start_bh = NULL;
do { do {
BOOL is_retry = FALSE; bool is_retry = false;
if (likely(block < rec_block)) { if (likely(block < rec_block)) {
if (unlikely(block >= dblock)) { if (unlikely(block >= dblock)) {
...@@ -1009,10 +1009,10 @@ static int ntfs_write_mst_block(struct page *page, ...@@ -1009,10 +1009,10 @@ static int ntfs_write_mst_block(struct page *page,
} }
if (!buffer_dirty(bh)) { if (!buffer_dirty(bh)) {
/* Clean records are not written out. */ /* Clean records are not written out. */
rec_is_dirty = FALSE; rec_is_dirty = false;
continue; continue;
} }
rec_is_dirty = TRUE; rec_is_dirty = true;
rec_start_bh = bh; rec_start_bh = bh;
} }
/* Need to map the buffer if it is not mapped already. */ /* Need to map the buffer if it is not mapped already. */
...@@ -1053,7 +1053,7 @@ static int ntfs_write_mst_block(struct page *page, ...@@ -1053,7 +1053,7 @@ static int ntfs_write_mst_block(struct page *page,
*/ */
if (!is_mft && !is_retry && if (!is_mft && !is_retry &&
lcn == LCN_RL_NOT_MAPPED) { lcn == LCN_RL_NOT_MAPPED) {
is_retry = TRUE; is_retry = true;
/* /*
* Attempt to map runlist, dropping * Attempt to map runlist, dropping
* lock for the duration. * lock for the duration.
...@@ -1063,7 +1063,7 @@ static int ntfs_write_mst_block(struct page *page, ...@@ -1063,7 +1063,7 @@ static int ntfs_write_mst_block(struct page *page,
if (likely(!err2)) if (likely(!err2))
goto lock_retry_remap; goto lock_retry_remap;
if (err2 == -ENOMEM) if (err2 == -ENOMEM)
page_is_dirty = TRUE; page_is_dirty = true;
lcn = err2; lcn = err2;
} else { } else {
err2 = -EIO; err2 = -EIO;
...@@ -1145,7 +1145,7 @@ static int ntfs_write_mst_block(struct page *page, ...@@ -1145,7 +1145,7 @@ static int ntfs_write_mst_block(struct page *page,
* means we need to redirty the page before * means we need to redirty the page before
* returning. * returning.
*/ */
page_is_dirty = TRUE; page_is_dirty = true;
/* /*
* Remove the buffers in this mft record from * Remove the buffers in this mft record from
* the list of buffers to write. * the list of buffers to write.
......
...@@ -80,7 +80,7 @@ static inline void ntfs_unmap_page(struct page *page) ...@@ -80,7 +80,7 @@ static inline void ntfs_unmap_page(struct page *page)
* *
* The unlocked and uptodate page is returned on success or an encoded error * The unlocked and uptodate page is returned on success or an encoded error
* on failure. Caller has to test for error using the IS_ERR() macro on the * on failure. Caller has to test for error using the IS_ERR() macro on the
* return value. If that evaluates to TRUE, the negative error code can be * return value. If that evaluates to 'true', the negative error code can be
* obtained using PTR_ERR() on the return value of ntfs_map_page(). * obtained using PTR_ERR() on the return value of ntfs_map_page().
*/ */
static inline struct page *ntfs_map_page(struct address_space *mapping, static inline struct page *ntfs_map_page(struct address_space *mapping,
......
...@@ -67,7 +67,7 @@ ...@@ -67,7 +67,7 @@
* the attribute has zero allocated size, i.e. there simply is no runlist. * the attribute has zero allocated size, i.e. there simply is no runlist.
* *
* WARNING: If @ctx is supplied, regardless of whether success or failure is * WARNING: If @ctx is supplied, regardless of whether success or failure is
* returned, you need to check IS_ERR(@ctx->mrec) and if TRUE the @ctx * returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
* is no longer valid, i.e. you need to either call * is no longer valid, i.e. you need to either call
* ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it. * ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
* In that case PTR_ERR(@ctx->mrec) will give you the error code for * In that case PTR_ERR(@ctx->mrec) will give you the error code for
...@@ -90,7 +90,7 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx) ...@@ -90,7 +90,7 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
runlist_element *rl; runlist_element *rl;
struct page *put_this_page = NULL; struct page *put_this_page = NULL;
int err = 0; int err = 0;
BOOL ctx_is_temporary, ctx_needs_reset; bool ctx_is_temporary, ctx_needs_reset;
ntfs_attr_search_ctx old_ctx = { NULL, }; ntfs_attr_search_ctx old_ctx = { NULL, };
ntfs_debug("Mapping runlist part containing vcn 0x%llx.", ntfs_debug("Mapping runlist part containing vcn 0x%llx.",
...@@ -100,7 +100,7 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx) ...@@ -100,7 +100,7 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
else else
base_ni = ni->ext.base_ntfs_ino; base_ni = ni->ext.base_ntfs_ino;
if (!ctx) { if (!ctx) {
ctx_is_temporary = ctx_needs_reset = TRUE; ctx_is_temporary = ctx_needs_reset = true;
m = map_mft_record(base_ni); m = map_mft_record(base_ni);
if (IS_ERR(m)) if (IS_ERR(m))
return PTR_ERR(m); return PTR_ERR(m);
...@@ -115,7 +115,7 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx) ...@@ -115,7 +115,7 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
BUG_ON(IS_ERR(ctx->mrec)); BUG_ON(IS_ERR(ctx->mrec));
a = ctx->attr; a = ctx->attr;
BUG_ON(!a->non_resident); BUG_ON(!a->non_resident);
ctx_is_temporary = FALSE; ctx_is_temporary = false;
end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn); end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
read_lock_irqsave(&ni->size_lock, flags); read_lock_irqsave(&ni->size_lock, flags);
allocated_size_vcn = ni->allocated_size >> allocated_size_vcn = ni->allocated_size >>
...@@ -136,7 +136,7 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx) ...@@ -136,7 +136,7 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
ni->name, ni->name_len) && ni->name, ni->name_len) &&
sle64_to_cpu(a->data.non_resident.lowest_vcn) sle64_to_cpu(a->data.non_resident.lowest_vcn)
<= vcn && end_vcn >= vcn)) <= vcn && end_vcn >= vcn))
ctx_needs_reset = FALSE; ctx_needs_reset = false;
else { else {
/* Save the old search context. */ /* Save the old search context. */
old_ctx = *ctx; old_ctx = *ctx;
...@@ -158,7 +158,7 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx) ...@@ -158,7 +158,7 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
* needed attribute extent. * needed attribute extent.
*/ */
ntfs_attr_reinit_search_ctx(ctx); ntfs_attr_reinit_search_ctx(ctx);
ctx_needs_reset = TRUE; ctx_needs_reset = true;
} }
} }
if (ctx_needs_reset) { if (ctx_needs_reset) {
...@@ -336,16 +336,16 @@ int ntfs_map_runlist(ntfs_inode *ni, VCN vcn) ...@@ -336,16 +336,16 @@ int ntfs_map_runlist(ntfs_inode *ni, VCN vcn)
* LCN_EIO Critical error (runlist/file is corrupt, i/o error, etc). * LCN_EIO Critical error (runlist/file is corrupt, i/o error, etc).
* *
* Locking: - The runlist must be locked on entry and is left locked on return. * Locking: - The runlist must be locked on entry and is left locked on return.
* - If @write_locked is FALSE, i.e. the runlist is locked for reading, * - If @write_locked is 'false', i.e. the runlist is locked for reading,
* the lock may be dropped inside the function so you cannot rely on * the lock may be dropped inside the function so you cannot rely on
* the runlist still being the same when this function returns. * the runlist still being the same when this function returns.
*/ */
LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn, LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn,
const BOOL write_locked) const bool write_locked)
{ {
LCN lcn; LCN lcn;
unsigned long flags; unsigned long flags;
BOOL is_retry = FALSE; bool is_retry = false;
ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, %s_locked.", ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, %s_locked.",
ni->mft_no, (unsigned long long)vcn, ni->mft_no, (unsigned long long)vcn,
...@@ -390,7 +390,7 @@ LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn, ...@@ -390,7 +390,7 @@ LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn,
down_read(&ni->runlist.lock); down_read(&ni->runlist.lock);
} }
if (likely(!err)) { if (likely(!err)) {
is_retry = TRUE; is_retry = true;
goto retry_remap; goto retry_remap;
} }
if (err == -ENOENT) if (err == -ENOENT)
...@@ -449,7 +449,7 @@ LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn, ...@@ -449,7 +449,7 @@ LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn,
* -EIO - Critical error (runlist/file is corrupt, i/o error, etc). * -EIO - Critical error (runlist/file is corrupt, i/o error, etc).
* *
* WARNING: If @ctx is supplied, regardless of whether success or failure is * WARNING: If @ctx is supplied, regardless of whether success or failure is
* returned, you need to check IS_ERR(@ctx->mrec) and if TRUE the @ctx * returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
* is no longer valid, i.e. you need to either call * is no longer valid, i.e. you need to either call
* ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it. * ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
* In that case PTR_ERR(@ctx->mrec) will give you the error code for * In that case PTR_ERR(@ctx->mrec) will give you the error code for
...@@ -469,7 +469,7 @@ runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni, const VCN vcn, ...@@ -469,7 +469,7 @@ runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni, const VCN vcn,
unsigned long flags; unsigned long flags;
runlist_element *rl; runlist_element *rl;
int err = 0; int err = 0;
BOOL is_retry = FALSE; bool is_retry = false;
ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, with%s ctx.", ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, with%s ctx.",
ni->mft_no, (unsigned long long)vcn, ctx ? "" : "out"); ni->mft_no, (unsigned long long)vcn, ctx ? "" : "out");
...@@ -518,7 +518,7 @@ runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni, const VCN vcn, ...@@ -518,7 +518,7 @@ runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni, const VCN vcn,
*/ */
err = ntfs_map_runlist_nolock(ni, vcn, ctx); err = ntfs_map_runlist_nolock(ni, vcn, ctx);
if (likely(!err)) { if (likely(!err)) {
is_retry = TRUE; is_retry = true;
goto retry_remap; goto retry_remap;
} }
} }
...@@ -558,8 +558,8 @@ runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni, const VCN vcn, ...@@ -558,8 +558,8 @@ runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni, const VCN vcn,
* On actual error, ntfs_attr_find() returns -EIO. In this case @ctx->attr is * On actual error, ntfs_attr_find() returns -EIO. In this case @ctx->attr is
* undefined and in particular do not rely on it not changing. * undefined and in particular do not rely on it not changing.
* *
* If @ctx->is_first is TRUE, the search begins with @ctx->attr itself. If it * If @ctx->is_first is 'true', the search begins with @ctx->attr itself. If it
* is FALSE, the search begins after @ctx->attr. * is 'false', the search begins after @ctx->attr.
* *
* If @ic is IGNORE_CASE, the @name comparisson is not case sensitive and * If @ic is IGNORE_CASE, the @name comparisson is not case sensitive and
* @ctx->ntfs_ino must be set to the ntfs inode to which the mft record * @ctx->ntfs_ino must be set to the ntfs inode to which the mft record
...@@ -599,11 +599,11 @@ static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name, ...@@ -599,11 +599,11 @@ static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
/* /*
* Iterate over attributes in mft record starting at @ctx->attr, or the * Iterate over attributes in mft record starting at @ctx->attr, or the
* attribute following that, if @ctx->is_first is TRUE. * attribute following that, if @ctx->is_first is 'true'.
*/ */
if (ctx->is_first) { if (ctx->is_first) {
a = ctx->attr; a = ctx->attr;
ctx->is_first = FALSE; ctx->is_first = false;
} else } else
a = (ATTR_RECORD*)((u8*)ctx->attr + a = (ATTR_RECORD*)((u8*)ctx->attr +
le32_to_cpu(ctx->attr->length)); le32_to_cpu(ctx->attr->length));
...@@ -890,11 +890,11 @@ static int ntfs_external_attr_find(const ATTR_TYPE type, ...@@ -890,11 +890,11 @@ static int ntfs_external_attr_find(const ATTR_TYPE type,
ctx->al_entry = (ATTR_LIST_ENTRY*)al_start; ctx->al_entry = (ATTR_LIST_ENTRY*)al_start;
/* /*
* Iterate over entries in attribute list starting at @ctx->al_entry, * Iterate over entries in attribute list starting at @ctx->al_entry,
* or the entry following that, if @ctx->is_first is TRUE. * or the entry following that, if @ctx->is_first is 'true'.
*/ */
if (ctx->is_first) { if (ctx->is_first) {
al_entry = ctx->al_entry; al_entry = ctx->al_entry;
ctx->is_first = FALSE; ctx->is_first = false;
} else } else
al_entry = (ATTR_LIST_ENTRY*)((u8*)ctx->al_entry + al_entry = (ATTR_LIST_ENTRY*)((u8*)ctx->al_entry +
le16_to_cpu(ctx->al_entry->length)); le16_to_cpu(ctx->al_entry->length));
...@@ -1127,7 +1127,7 @@ static int ntfs_external_attr_find(const ATTR_TYPE type, ...@@ -1127,7 +1127,7 @@ static int ntfs_external_attr_find(const ATTR_TYPE type,
ctx->mrec = ctx->base_mrec; ctx->mrec = ctx->base_mrec;
ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec + ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
le16_to_cpu(ctx->mrec->attrs_offset)); le16_to_cpu(ctx->mrec->attrs_offset));
ctx->is_first = TRUE; ctx->is_first = true;
ctx->ntfs_ino = base_ni; ctx->ntfs_ino = base_ni;
ctx->base_ntfs_ino = NULL; ctx->base_ntfs_ino = NULL;
ctx->base_mrec = NULL; ctx->base_mrec = NULL;
...@@ -1224,7 +1224,7 @@ static inline void ntfs_attr_init_search_ctx(ntfs_attr_search_ctx *ctx, ...@@ -1224,7 +1224,7 @@ static inline void ntfs_attr_init_search_ctx(ntfs_attr_search_ctx *ctx,
/* Sanity checks are performed elsewhere. */ /* Sanity checks are performed elsewhere. */
.attr = (ATTR_RECORD*)((u8*)mrec + .attr = (ATTR_RECORD*)((u8*)mrec +
le16_to_cpu(mrec->attrs_offset)), le16_to_cpu(mrec->attrs_offset)),
.is_first = TRUE, .is_first = true,
.ntfs_ino = ni, .ntfs_ino = ni,
}; };
} }
...@@ -1243,7 +1243,7 @@ void ntfs_attr_reinit_search_ctx(ntfs_attr_search_ctx *ctx) ...@@ -1243,7 +1243,7 @@ void ntfs_attr_reinit_search_ctx(ntfs_attr_search_ctx *ctx)
{ {
if (likely(!ctx->base_ntfs_ino)) { if (likely(!ctx->base_ntfs_ino)) {
/* No attribute list. */ /* No attribute list. */
ctx->is_first = TRUE; ctx->is_first = true;
/* Sanity checks are performed elsewhere. */ /* Sanity checks are performed elsewhere. */
ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec + ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
le16_to_cpu(ctx->mrec->attrs_offset)); le16_to_cpu(ctx->mrec->attrs_offset));
...@@ -1585,7 +1585,7 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size) ...@@ -1585,7 +1585,7 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
return -ENOMEM; return -ENOMEM;
/* Start by allocating clusters to hold the attribute value. */ /* Start by allocating clusters to hold the attribute value. */
rl = ntfs_cluster_alloc(vol, 0, new_size >> rl = ntfs_cluster_alloc(vol, 0, new_size >>
vol->cluster_size_bits, -1, DATA_ZONE, TRUE); vol->cluster_size_bits, -1, DATA_ZONE, true);
if (IS_ERR(rl)) { if (IS_ERR(rl)) {
err = PTR_ERR(rl); err = PTR_ERR(rl);
ntfs_debug("Failed to allocate cluster%s, error code " ntfs_debug("Failed to allocate cluster%s, error code "
...@@ -1919,7 +1919,7 @@ s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size, ...@@ -1919,7 +1919,7 @@ s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size,
unsigned long flags; unsigned long flags;
int err, mp_size; int err, mp_size;
u32 attr_len = 0; /* Silence stupid gcc warning. */ u32 attr_len = 0; /* Silence stupid gcc warning. */
BOOL mp_rebuilt; bool mp_rebuilt;
#ifdef NTFS_DEBUG #ifdef NTFS_DEBUG
read_lock_irqsave(&ni->size_lock, flags); read_lock_irqsave(&ni->size_lock, flags);
...@@ -2222,7 +2222,7 @@ s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size, ...@@ -2222,7 +2222,7 @@ s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size,
rl2 = ntfs_cluster_alloc(vol, allocated_size >> vol->cluster_size_bits, rl2 = ntfs_cluster_alloc(vol, allocated_size >> vol->cluster_size_bits,
(new_alloc_size - allocated_size) >> (new_alloc_size - allocated_size) >>
vol->cluster_size_bits, (rl && (rl->lcn >= 0)) ? vol->cluster_size_bits, (rl && (rl->lcn >= 0)) ?
rl->lcn + rl->length : -1, DATA_ZONE, TRUE); rl->lcn + rl->length : -1, DATA_ZONE, true);
if (IS_ERR(rl2)) { if (IS_ERR(rl2)) {
err = PTR_ERR(rl2); err = PTR_ERR(rl2);
if (start < 0 || start >= allocated_size) if (start < 0 || start >= allocated_size)
...@@ -2265,7 +2265,7 @@ s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size, ...@@ -2265,7 +2265,7 @@ s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size,
BUG_ON(!rl2); BUG_ON(!rl2);
BUG_ON(!rl2->length); BUG_ON(!rl2->length);
BUG_ON(rl2->lcn < LCN_HOLE); BUG_ON(rl2->lcn < LCN_HOLE);
mp_rebuilt = FALSE; mp_rebuilt = false;
/* Get the size for the new mapping pairs array for this extent. */ /* Get the size for the new mapping pairs array for this extent. */
mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, ll, -1); mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, ll, -1);
if (unlikely(mp_size <= 0)) { if (unlikely(mp_size <= 0)) {
...@@ -2300,7 +2300,7 @@ s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size, ...@@ -2300,7 +2300,7 @@ s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size,
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto undo_alloc; goto undo_alloc;
} }
mp_rebuilt = TRUE; mp_rebuilt = true;
/* Generate the mapping pairs array directly into the attr record. */ /* Generate the mapping pairs array directly into the attr record. */
err = ntfs_mapping_pairs_build(vol, (u8*)a + err = ntfs_mapping_pairs_build(vol, (u8*)a +
le16_to_cpu(a->data.non_resident.mapping_pairs_offset), le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
......
...@@ -40,10 +40,10 @@ ...@@ -40,10 +40,10 @@
* Structure must be initialized to zero before the first call to one of the * Structure must be initialized to zero before the first call to one of the
* attribute search functions. Initialize @mrec to point to the mft record to * attribute search functions. Initialize @mrec to point to the mft record to
* search, and @attr to point to the first attribute within @mrec (not necessary * search, and @attr to point to the first attribute within @mrec (not necessary
* if calling the _first() functions), and set @is_first to TRUE (not necessary * if calling the _first() functions), and set @is_first to 'true' (not necessary
* if calling the _first() functions). * if calling the _first() functions).
* *
* If @is_first is TRUE, the search begins with @attr. If @is_first is FALSE, * If @is_first is 'true', the search begins with @attr. If @is_first is 'false',
* the search begins after @attr. This is so that, after the first call to one * the search begins after @attr. This is so that, after the first call to one
* of the search attribute functions, we can call the function again, without * of the search attribute functions, we can call the function again, without
* any modification of the search context, to automagically get the next * any modification of the search context, to automagically get the next
...@@ -52,7 +52,7 @@ ...@@ -52,7 +52,7 @@
typedef struct { typedef struct {
MFT_RECORD *mrec; MFT_RECORD *mrec;
ATTR_RECORD *attr; ATTR_RECORD *attr;
BOOL is_first; bool is_first;
ntfs_inode *ntfs_ino; ntfs_inode *ntfs_ino;
ATTR_LIST_ENTRY *al_entry; ATTR_LIST_ENTRY *al_entry;
ntfs_inode *base_ntfs_ino; ntfs_inode *base_ntfs_ino;
...@@ -65,7 +65,7 @@ extern int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ...@@ -65,7 +65,7 @@ extern int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn,
extern int ntfs_map_runlist(ntfs_inode *ni, VCN vcn); extern int ntfs_map_runlist(ntfs_inode *ni, VCN vcn);
extern LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn, extern LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn,
const BOOL write_locked); const bool write_locked);
extern runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni, extern runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni,
const VCN vcn, ntfs_attr_search_ctx *ctx); const VCN vcn, ntfs_attr_search_ctx *ctx);
......
...@@ -34,18 +34,18 @@ ...@@ -34,18 +34,18 @@
* @start_bit: first bit to set * @start_bit: first bit to set
* @count: number of bits to set * @count: number of bits to set
* @value: value to set the bits to (i.e. 0 or 1) * @value: value to set the bits to (i.e. 0 or 1)
* @is_rollback: if TRUE this is a rollback operation * @is_rollback: if 'true' this is a rollback operation
* *
* Set @count bits starting at bit @start_bit in the bitmap described by the * Set @count bits starting at bit @start_bit in the bitmap described by the
* vfs inode @vi to @value, where @value is either 0 or 1. * vfs inode @vi to @value, where @value is either 0 or 1.
* *
* @is_rollback should always be FALSE, it is for internal use to rollback * @is_rollback should always be 'false', it is for internal use to rollback
* errors. You probably want to use ntfs_bitmap_set_bits_in_run() instead. * errors. You probably want to use ntfs_bitmap_set_bits_in_run() instead.
* *
* Return 0 on success and -errno on error. * Return 0 on success and -errno on error.
*/ */
int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit, int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
const s64 count, const u8 value, const BOOL is_rollback) const s64 count, const u8 value, const bool is_rollback)
{ {
s64 cnt = count; s64 cnt = count;
pgoff_t index, end_index; pgoff_t index, end_index;
...@@ -172,7 +172,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit, ...@@ -172,7 +172,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
return PTR_ERR(page); return PTR_ERR(page);
if (count != cnt) if (count != cnt)
pos = __ntfs_bitmap_set_bits_in_run(vi, start_bit, count - cnt, pos = __ntfs_bitmap_set_bits_in_run(vi, start_bit, count - cnt,
value ? 0 : 1, TRUE); value ? 0 : 1, true);
else else
pos = 0; pos = 0;
if (!pos) { if (!pos) {
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include "types.h" #include "types.h"
extern int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit, extern int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
const s64 count, const u8 value, const BOOL is_rollback); const s64 count, const u8 value, const bool is_rollback);
/** /**
* ntfs_bitmap_set_bits_in_run - set a run of bits in a bitmap to a value * ntfs_bitmap_set_bits_in_run - set a run of bits in a bitmap to a value
...@@ -48,7 +48,7 @@ static inline int ntfs_bitmap_set_bits_in_run(struct inode *vi, ...@@ -48,7 +48,7 @@ static inline int ntfs_bitmap_set_bits_in_run(struct inode *vi,
const s64 start_bit, const s64 count, const u8 value) const s64 start_bit, const s64 count, const u8 value)
{ {
return __ntfs_bitmap_set_bits_in_run(vi, start_bit, count, value, return __ntfs_bitmap_set_bits_in_run(vi, start_bit, count, value,
FALSE); false);
} }
/** /**
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#include "types.h" #include "types.h"
#include "volume.h" #include "volume.h"
static inline BOOL ntfs_is_collation_rule_supported(COLLATION_RULE cr) { static inline bool ntfs_is_collation_rule_supported(COLLATION_RULE cr) {
int i; int i;
/* /*
...@@ -35,12 +35,12 @@ static inline BOOL ntfs_is_collation_rule_supported(COLLATION_RULE cr) { ...@@ -35,12 +35,12 @@ static inline BOOL ntfs_is_collation_rule_supported(COLLATION_RULE cr) {
* now. * now.
*/ */
if (unlikely(cr != COLLATION_BINARY && cr != COLLATION_NTOFS_ULONG)) if (unlikely(cr != COLLATION_BINARY && cr != COLLATION_NTOFS_ULONG))
return FALSE; return false;
i = le32_to_cpu(cr); i = le32_to_cpu(cr);
if (likely(((i >= 0) && (i <= 0x02)) || if (likely(((i >= 0) && (i <= 0x02)) ||
((i >= 0x10) && (i <= 0x13)))) ((i >= 0x10) && (i <= 0x13))))
return TRUE; return true;
return FALSE; return false;
} }
extern int ntfs_collate(ntfs_volume *vol, COLLATION_RULE cr, extern int ntfs_collate(ntfs_volume *vol, COLLATION_RULE cr,
......
...@@ -600,7 +600,7 @@ int ntfs_read_compressed_block(struct page *page) ...@@ -600,7 +600,7 @@ int ntfs_read_compressed_block(struct page *page)
rl = NULL; rl = NULL;
for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn; for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn;
vcn++) { vcn++) {
BOOL is_retry = FALSE; bool is_retry = false;
if (!rl) { if (!rl) {
lock_retry_remap: lock_retry_remap:
...@@ -626,7 +626,7 @@ int ntfs_read_compressed_block(struct page *page) ...@@ -626,7 +626,7 @@ int ntfs_read_compressed_block(struct page *page)
break; break;
if (is_retry || lcn != LCN_RL_NOT_MAPPED) if (is_retry || lcn != LCN_RL_NOT_MAPPED)
goto rl_err; goto rl_err;
is_retry = TRUE; is_retry = true;
/* /*
* Attempt to map runlist, dropping lock for the * Attempt to map runlist, dropping lock for the
* duration. * duration.
......
...@@ -509,7 +509,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -509,7 +509,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
u32 attr_rec_len = 0; u32 attr_rec_len = 0;
unsigned blocksize, u; unsigned blocksize, u;
int err, mp_size; int err, mp_size;
BOOL rl_write_locked, was_hole, is_retry; bool rl_write_locked, was_hole, is_retry;
unsigned char blocksize_bits; unsigned char blocksize_bits;
struct { struct {
u8 runlist_merged:1; u8 runlist_merged:1;
...@@ -543,13 +543,13 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -543,13 +543,13 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
return -ENOMEM; return -ENOMEM;
} }
} while (++u < nr_pages); } while (++u < nr_pages);
rl_write_locked = FALSE; rl_write_locked = false;
rl = NULL; rl = NULL;
err = 0; err = 0;
vcn = lcn = -1; vcn = lcn = -1;
vcn_len = 0; vcn_len = 0;
lcn_block = -1; lcn_block = -1;
was_hole = FALSE; was_hole = false;
cpos = pos >> vol->cluster_size_bits; cpos = pos >> vol->cluster_size_bits;
end = pos + bytes; end = pos + bytes;
cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits; cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits;
...@@ -760,7 +760,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -760,7 +760,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
} }
continue; continue;
} }
is_retry = FALSE; is_retry = false;
if (!rl) { if (!rl) {
down_read(&ni->runlist.lock); down_read(&ni->runlist.lock);
retry_remap: retry_remap:
...@@ -776,7 +776,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -776,7 +776,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
* Successful remap, setup the map cache and * Successful remap, setup the map cache and
* use that to deal with the buffer. * use that to deal with the buffer.
*/ */
was_hole = FALSE; was_hole = false;
vcn = bh_cpos; vcn = bh_cpos;
vcn_len = rl[1].vcn - vcn; vcn_len = rl[1].vcn - vcn;
lcn_block = lcn << (vol->cluster_size_bits - lcn_block = lcn << (vol->cluster_size_bits -
...@@ -792,7 +792,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -792,7 +792,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
if (likely(vcn + vcn_len >= cend)) { if (likely(vcn + vcn_len >= cend)) {
if (rl_write_locked) { if (rl_write_locked) {
up_write(&ni->runlist.lock); up_write(&ni->runlist.lock);
rl_write_locked = FALSE; rl_write_locked = false;
} else } else
up_read(&ni->runlist.lock); up_read(&ni->runlist.lock);
rl = NULL; rl = NULL;
...@@ -818,13 +818,13 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -818,13 +818,13 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
*/ */
up_read(&ni->runlist.lock); up_read(&ni->runlist.lock);
down_write(&ni->runlist.lock); down_write(&ni->runlist.lock);
rl_write_locked = TRUE; rl_write_locked = true;
goto retry_remap; goto retry_remap;
} }
err = ntfs_map_runlist_nolock(ni, bh_cpos, err = ntfs_map_runlist_nolock(ni, bh_cpos,
NULL); NULL);
if (likely(!err)) { if (likely(!err)) {
is_retry = TRUE; is_retry = true;
goto retry_remap; goto retry_remap;
} }
/* /*
...@@ -903,7 +903,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -903,7 +903,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
if (!rl_write_locked) { if (!rl_write_locked) {
up_read(&ni->runlist.lock); up_read(&ni->runlist.lock);
down_write(&ni->runlist.lock); down_write(&ni->runlist.lock);
rl_write_locked = TRUE; rl_write_locked = true;
goto retry_remap; goto retry_remap;
} }
/* Find the previous last allocated cluster. */ /* Find the previous last allocated cluster. */
...@@ -917,7 +917,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -917,7 +917,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
} }
} }
rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE, rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE,
FALSE); false);
if (IS_ERR(rl2)) { if (IS_ERR(rl2)) {
err = PTR_ERR(rl2); err = PTR_ERR(rl2);
ntfs_debug("Failed to allocate cluster, error code %i.", ntfs_debug("Failed to allocate cluster, error code %i.",
...@@ -1093,7 +1093,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -1093,7 +1093,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
status.mft_attr_mapped = 0; status.mft_attr_mapped = 0;
status.mp_rebuilt = 0; status.mp_rebuilt = 0;
/* Setup the map cache and use that to deal with the buffer. */ /* Setup the map cache and use that to deal with the buffer. */
was_hole = TRUE; was_hole = true;
vcn = bh_cpos; vcn = bh_cpos;
vcn_len = 1; vcn_len = 1;
lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits); lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits);
...@@ -1105,7 +1105,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -1105,7 +1105,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
*/ */
if (likely(vcn + vcn_len >= cend)) { if (likely(vcn + vcn_len >= cend)) {
up_write(&ni->runlist.lock); up_write(&ni->runlist.lock);
rl_write_locked = FALSE; rl_write_locked = false;
rl = NULL; rl = NULL;
} }
goto map_buffer_cached; goto map_buffer_cached;
...@@ -1117,7 +1117,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -1117,7 +1117,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
if (likely(!err)) { if (likely(!err)) {
if (unlikely(rl_write_locked)) { if (unlikely(rl_write_locked)) {
up_write(&ni->runlist.lock); up_write(&ni->runlist.lock);
rl_write_locked = FALSE; rl_write_locked = false;
} else if (unlikely(rl)) } else if (unlikely(rl))
up_read(&ni->runlist.lock); up_read(&ni->runlist.lock);
rl = NULL; rl = NULL;
...@@ -1528,19 +1528,19 @@ static inline int ntfs_commit_pages_after_non_resident_write( ...@@ -1528,19 +1528,19 @@ static inline int ntfs_commit_pages_after_non_resident_write(
do { do {
s64 bh_pos; s64 bh_pos;
struct page *page; struct page *page;
BOOL partial; bool partial;
page = pages[u]; page = pages[u];
bh_pos = (s64)page->index << PAGE_CACHE_SHIFT; bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
bh = head = page_buffers(page); bh = head = page_buffers(page);
partial = FALSE; partial = false;
do { do {
s64 bh_end; s64 bh_end;
bh_end = bh_pos + blocksize; bh_end = bh_pos + blocksize;
if (bh_end <= pos || bh_pos >= end) { if (bh_end <= pos || bh_pos >= end) {
if (!buffer_uptodate(bh)) if (!buffer_uptodate(bh))
partial = TRUE; partial = true;
} else { } else {
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
...@@ -1997,7 +1997,7 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, ...@@ -1997,7 +1997,7 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
*/ */
down_read(&ni->runlist.lock); down_read(&ni->runlist.lock);
lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >> lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >>
vol->cluster_size_bits, FALSE); vol->cluster_size_bits, false);
up_read(&ni->runlist.lock); up_read(&ni->runlist.lock);
if (unlikely(lcn < LCN_HOLE)) { if (unlikely(lcn < LCN_HOLE)) {
status = -EIO; status = -EIO;
......
...@@ -204,7 +204,7 @@ int ntfs_index_lookup(const void *key, const int key_len, ...@@ -204,7 +204,7 @@ int ntfs_index_lookup(const void *key, const int key_len,
if ((key_len == le16_to_cpu(ie->key_length)) && !memcmp(key, if ((key_len == le16_to_cpu(ie->key_length)) && !memcmp(key,
&ie->key, key_len)) { &ie->key, key_len)) {
ir_done: ir_done:
ictx->is_in_root = TRUE; ictx->is_in_root = true;
ictx->ir = ir; ictx->ir = ir;
ictx->actx = actx; ictx->actx = actx;
ictx->base_ni = base_ni; ictx->base_ni = base_ni;
...@@ -374,7 +374,7 @@ int ntfs_index_lookup(const void *key, const int key_len, ...@@ -374,7 +374,7 @@ int ntfs_index_lookup(const void *key, const int key_len,
if ((key_len == le16_to_cpu(ie->key_length)) && !memcmp(key, if ((key_len == le16_to_cpu(ie->key_length)) && !memcmp(key,
&ie->key, key_len)) { &ie->key, key_len)) {
ia_done: ia_done:
ictx->is_in_root = FALSE; ictx->is_in_root = false;
ictx->actx = NULL; ictx->actx = NULL;
ictx->base_ni = NULL; ictx->base_ni = NULL;
ictx->ia = ia; ictx->ia = ia;
......
...@@ -37,12 +37,12 @@ ...@@ -37,12 +37,12 @@
* @entry: index entry (points into @ir or @ia) * @entry: index entry (points into @ir or @ia)
* @data: index entry data (points into @entry) * @data: index entry data (points into @entry)
* @data_len: length in bytes of @data * @data_len: length in bytes of @data
* @is_in_root: TRUE if @entry is in @ir and FALSE if it is in @ia * @is_in_root: 'true' if @entry is in @ir and 'false' if it is in @ia
* @ir: index root if @is_in_root and NULL otherwise * @ir: index root if @is_in_root and NULL otherwise
* @actx: attribute search context if @is_in_root and NULL otherwise * @actx: attribute search context if @is_in_root and NULL otherwise
* @base_ni: base inode if @is_in_root and NULL otherwise * @base_ni: base inode if @is_in_root and NULL otherwise
* @ia: index block if @is_in_root is FALSE and NULL otherwise * @ia: index block if @is_in_root is 'false' and NULL otherwise
* @page: page if @is_in_root is FALSE and NULL otherwise * @page: page if @is_in_root is 'false' and NULL otherwise
* *
* @idx_ni is the index inode this context belongs to. * @idx_ni is the index inode this context belongs to.
* *
...@@ -50,11 +50,11 @@ ...@@ -50,11 +50,11 @@
* are the index entry data and its length in bytes, respectively. @data * are the index entry data and its length in bytes, respectively. @data
* simply points into @entry. This is probably what the user is interested in. * simply points into @entry. This is probably what the user is interested in.
* *
* If @is_in_root is TRUE, @entry is in the index root attribute @ir described * If @is_in_root is 'true', @entry is in the index root attribute @ir described
* by the attribute search context @actx and the base inode @base_ni. @ia and * by the attribute search context @actx and the base inode @base_ni. @ia and
* @page are NULL in this case. * @page are NULL in this case.
* *
* If @is_in_root is FALSE, @entry is in the index allocation attribute and @ia * If @is_in_root is 'false', @entry is in the index allocation attribute and @ia
* and @page point to the index allocation block and the mapped, locked page it * and @page point to the index allocation block and the mapped, locked page it
* is in, respectively. @ir, @actx and @base_ni are NULL in this case. * is in, respectively. @ir, @actx and @base_ni are NULL in this case.
* *
...@@ -77,7 +77,7 @@ typedef struct { ...@@ -77,7 +77,7 @@ typedef struct {
INDEX_ENTRY *entry; INDEX_ENTRY *entry;
void *data; void *data;
u16 data_len; u16 data_len;
BOOL is_in_root; bool is_in_root;
INDEX_ROOT *ir; INDEX_ROOT *ir;
ntfs_attr_search_ctx *actx; ntfs_attr_search_ctx *actx;
ntfs_inode *base_ni; ntfs_inode *base_ni;
......
...@@ -2301,7 +2301,7 @@ void ntfs_clear_big_inode(struct inode *vi) ...@@ -2301,7 +2301,7 @@ void ntfs_clear_big_inode(struct inode *vi)
} }
#ifdef NTFS_RW #ifdef NTFS_RW
if (NInoDirty(ni)) { if (NInoDirty(ni)) {
BOOL was_bad = (is_bad_inode(vi)); bool was_bad = (is_bad_inode(vi));
/* Committing the inode also commits all extent inodes. */ /* Committing the inode also commits all extent inodes. */
ntfs_commit_inode(vi); ntfs_commit_inode(vi);
...@@ -3015,7 +3015,7 @@ int ntfs_write_inode(struct inode *vi, int sync) ...@@ -3015,7 +3015,7 @@ int ntfs_write_inode(struct inode *vi, int sync)
MFT_RECORD *m; MFT_RECORD *m;
STANDARD_INFORMATION *si; STANDARD_INFORMATION *si;
int err = 0; int err = 0;
BOOL modified = FALSE; bool modified = false;
ntfs_debug("Entering for %sinode 0x%lx.", NInoAttr(ni) ? "attr " : "", ntfs_debug("Entering for %sinode 0x%lx.", NInoAttr(ni) ? "attr " : "",
vi->i_ino); vi->i_ino);
...@@ -3057,7 +3057,7 @@ int ntfs_write_inode(struct inode *vi, int sync) ...@@ -3057,7 +3057,7 @@ int ntfs_write_inode(struct inode *vi, int sync)
sle64_to_cpu(si->last_data_change_time), sle64_to_cpu(si->last_data_change_time),
(long long)sle64_to_cpu(nt)); (long long)sle64_to_cpu(nt));
si->last_data_change_time = nt; si->last_data_change_time = nt;
modified = TRUE; modified = true;
} }
nt = utc2ntfs(vi->i_ctime); nt = utc2ntfs(vi->i_ctime);
if (si->last_mft_change_time != nt) { if (si->last_mft_change_time != nt) {
...@@ -3066,7 +3066,7 @@ int ntfs_write_inode(struct inode *vi, int sync) ...@@ -3066,7 +3066,7 @@ int ntfs_write_inode(struct inode *vi, int sync)
sle64_to_cpu(si->last_mft_change_time), sle64_to_cpu(si->last_mft_change_time),
(long long)sle64_to_cpu(nt)); (long long)sle64_to_cpu(nt));
si->last_mft_change_time = nt; si->last_mft_change_time = nt;
modified = TRUE; modified = true;
} }
nt = utc2ntfs(vi->i_atime); nt = utc2ntfs(vi->i_atime);
if (si->last_access_time != nt) { if (si->last_access_time != nt) {
...@@ -3075,7 +3075,7 @@ int ntfs_write_inode(struct inode *vi, int sync) ...@@ -3075,7 +3075,7 @@ int ntfs_write_inode(struct inode *vi, int sync)
(long long)sle64_to_cpu(si->last_access_time), (long long)sle64_to_cpu(si->last_access_time),
(long long)sle64_to_cpu(nt)); (long long)sle64_to_cpu(nt));
si->last_access_time = nt; si->last_access_time = nt;
modified = TRUE; modified = true;
} }
/* /*
* If we just modified the standard information attribute we need to * If we just modified the standard information attribute we need to
......
...@@ -142,13 +142,13 @@ typedef le32 NTFS_RECORD_TYPE; ...@@ -142,13 +142,13 @@ typedef le32 NTFS_RECORD_TYPE;
* operator! (-8 * operator! (-8
*/ */
static inline BOOL __ntfs_is_magic(le32 x, NTFS_RECORD_TYPE r) static inline bool __ntfs_is_magic(le32 x, NTFS_RECORD_TYPE r)
{ {
return (x == r); return (x == r);
} }
#define ntfs_is_magic(x, m) __ntfs_is_magic(x, magic_##m) #define ntfs_is_magic(x, m) __ntfs_is_magic(x, magic_##m)
static inline BOOL __ntfs_is_magicp(le32 *p, NTFS_RECORD_TYPE r) static inline bool __ntfs_is_magicp(le32 *p, NTFS_RECORD_TYPE r)
{ {
return (*p == r); return (*p == r);
} }
...@@ -323,7 +323,7 @@ typedef le64 leMFT_REF; ...@@ -323,7 +323,7 @@ typedef le64 leMFT_REF;
#define MREF_LE(x) ((unsigned long)(le64_to_cpu(x) & MFT_REF_MASK_CPU)) #define MREF_LE(x) ((unsigned long)(le64_to_cpu(x) & MFT_REF_MASK_CPU))
#define MSEQNO_LE(x) ((u16)((le64_to_cpu(x) >> 48) & 0xffff)) #define MSEQNO_LE(x) ((u16)((le64_to_cpu(x) >> 48) & 0xffff))
#define IS_ERR_MREF(x) (((x) & 0x0000800000000000ULL) ? 1 : 0) #define IS_ERR_MREF(x) (((x) & 0x0000800000000000ULL) ? true : false)
#define ERR_MREF(x) ((u64)((s64)(x))) #define ERR_MREF(x) ((u64)((s64)(x)))
#define MREF_ERR(x) ((int)((s64)(x))) #define MREF_ERR(x) ((int)((s64)(x)))
......
...@@ -76,7 +76,7 @@ int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol, ...@@ -76,7 +76,7 @@ int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol,
* @count: number of clusters to allocate * @count: number of clusters to allocate
* @start_lcn: starting lcn at which to allocate the clusters (or -1 if none) * @start_lcn: starting lcn at which to allocate the clusters (or -1 if none)
* @zone: zone from which to allocate the clusters * @zone: zone from which to allocate the clusters
* @is_extension: if TRUE, this is an attribute extension * @is_extension: if 'true', this is an attribute extension
* *
* Allocate @count clusters preferably starting at cluster @start_lcn or at the * Allocate @count clusters preferably starting at cluster @start_lcn or at the
* current allocator position if @start_lcn is -1, on the mounted ntfs volume * current allocator position if @start_lcn is -1, on the mounted ntfs volume
...@@ -87,11 +87,11 @@ int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol, ...@@ -87,11 +87,11 @@ int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol,
* @start_vcn specifies the vcn of the first allocated cluster. This makes * @start_vcn specifies the vcn of the first allocated cluster. This makes
* merging the resulting runlist with the old runlist easier. * merging the resulting runlist with the old runlist easier.
* *
* If @is_extension is TRUE, the caller is allocating clusters to extend an * If @is_extension is 'true', the caller is allocating clusters to extend an
* attribute and if it is FALSE, the caller is allocating clusters to fill a * attribute and if it is 'false', the caller is allocating clusters to fill a
* hole in an attribute. Practically the difference is that if @is_extension * hole in an attribute. Practically the difference is that if @is_extension
* is TRUE the returned runlist will be terminated with LCN_ENOENT and if * is 'true' the returned runlist will be terminated with LCN_ENOENT and if
* @is_extension is FALSE the runlist will be terminated with * @is_extension is 'false' the runlist will be terminated with
* LCN_RL_NOT_MAPPED. * LCN_RL_NOT_MAPPED.
* *
* You need to check the return value with IS_ERR(). If this is false, the * You need to check the return value with IS_ERR(). If this is false, the
...@@ -146,7 +146,7 @@ int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol, ...@@ -146,7 +146,7 @@ int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol,
runlist_element *ntfs_cluster_alloc(ntfs_volume *vol, const VCN start_vcn, runlist_element *ntfs_cluster_alloc(ntfs_volume *vol, const VCN start_vcn,
const s64 count, const LCN start_lcn, const s64 count, const LCN start_lcn,
const NTFS_CLUSTER_ALLOCATION_ZONES zone, const NTFS_CLUSTER_ALLOCATION_ZONES zone,
const BOOL is_extension) const bool is_extension)
{ {
LCN zone_start, zone_end, bmp_pos, bmp_initial_pos, last_read_pos, lcn; LCN zone_start, zone_end, bmp_pos, bmp_initial_pos, last_read_pos, lcn;
LCN prev_lcn = 0, prev_run_len = 0, mft_zone_size; LCN prev_lcn = 0, prev_run_len = 0, mft_zone_size;
...@@ -818,7 +818,7 @@ switch_to_data1_zone: search_zone = 2; ...@@ -818,7 +818,7 @@ switch_to_data1_zone: search_zone = 2;
* Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that * Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that
* you cache ctx->mrec in a variable @m of type MFT_RECORD *. * you cache ctx->mrec in a variable @m of type MFT_RECORD *.
* *
* @is_rollback should always be FALSE, it is for internal use to rollback * @is_rollback should always be 'false', it is for internal use to rollback
* errors. You probably want to use ntfs_cluster_free() instead. * errors. You probably want to use ntfs_cluster_free() instead.
* *
* Note, __ntfs_cluster_free() does not modify the runlist, so you have to * Note, __ntfs_cluster_free() does not modify the runlist, so you have to
...@@ -828,7 +828,7 @@ switch_to_data1_zone: search_zone = 2; ...@@ -828,7 +828,7 @@ switch_to_data1_zone: search_zone = 2;
* success and -errno on error. * success and -errno on error.
* *
* WARNING: If @ctx is supplied, regardless of whether success or failure is * WARNING: If @ctx is supplied, regardless of whether success or failure is
* returned, you need to check IS_ERR(@ctx->mrec) and if TRUE the @ctx * returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
* is no longer valid, i.e. you need to either call * is no longer valid, i.e. you need to either call
* ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it. * ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
* In that case PTR_ERR(@ctx->mrec) will give you the error code for * In that case PTR_ERR(@ctx->mrec) will give you the error code for
...@@ -847,7 +847,7 @@ switch_to_data1_zone: search_zone = 2; ...@@ -847,7 +847,7 @@ switch_to_data1_zone: search_zone = 2;
* and it will be left mapped on return. * and it will be left mapped on return.
*/ */
s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, s64 count, s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, s64 count,
ntfs_attr_search_ctx *ctx, const BOOL is_rollback) ntfs_attr_search_ctx *ctx, const bool is_rollback)
{ {
s64 delta, to_free, total_freed, real_freed; s64 delta, to_free, total_freed, real_freed;
ntfs_volume *vol; ntfs_volume *vol;
...@@ -999,7 +999,7 @@ s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, s64 count, ...@@ -999,7 +999,7 @@ s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, s64 count,
* If rollback fails, set the volume errors flag, emit an error * If rollback fails, set the volume errors flag, emit an error
* message, and return the error code. * message, and return the error code.
*/ */
delta = __ntfs_cluster_free(ni, start_vcn, total_freed, ctx, TRUE); delta = __ntfs_cluster_free(ni, start_vcn, total_freed, ctx, true);
if (delta < 0) { if (delta < 0) {
ntfs_error(vol->sb, "Failed to rollback (error %i). Leaving " ntfs_error(vol->sb, "Failed to rollback (error %i). Leaving "
"inconsistent metadata! Unmount and run " "inconsistent metadata! Unmount and run "
......
...@@ -43,10 +43,10 @@ typedef enum { ...@@ -43,10 +43,10 @@ typedef enum {
extern runlist_element *ntfs_cluster_alloc(ntfs_volume *vol, extern runlist_element *ntfs_cluster_alloc(ntfs_volume *vol,
const VCN start_vcn, const s64 count, const LCN start_lcn, const VCN start_vcn, const s64 count, const LCN start_lcn,
const NTFS_CLUSTER_ALLOCATION_ZONES zone, const NTFS_CLUSTER_ALLOCATION_ZONES zone,
const BOOL is_extension); const bool is_extension);
extern s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, extern s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn,
s64 count, ntfs_attr_search_ctx *ctx, const BOOL is_rollback); s64 count, ntfs_attr_search_ctx *ctx, const bool is_rollback);
/** /**
* ntfs_cluster_free - free clusters on an ntfs volume * ntfs_cluster_free - free clusters on an ntfs volume
...@@ -86,7 +86,7 @@ extern s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, ...@@ -86,7 +86,7 @@ extern s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn,
* success and -errno on error. * success and -errno on error.
* *
* WARNING: If @ctx is supplied, regardless of whether success or failure is * WARNING: If @ctx is supplied, regardless of whether success or failure is
* returned, you need to check IS_ERR(@ctx->mrec) and if TRUE the @ctx * returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
* is no longer valid, i.e. you need to either call * is no longer valid, i.e. you need to either call
* ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it. * ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
* In that case PTR_ERR(@ctx->mrec) will give you the error code for * In that case PTR_ERR(@ctx->mrec) will give you the error code for
...@@ -107,7 +107,7 @@ extern s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, ...@@ -107,7 +107,7 @@ extern s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn,
static inline s64 ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, static inline s64 ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn,
s64 count, ntfs_attr_search_ctx *ctx) s64 count, ntfs_attr_search_ctx *ctx)
{ {
return __ntfs_cluster_free(ni, start_vcn, count, ctx, FALSE); return __ntfs_cluster_free(ni, start_vcn, count, ctx, false);
} }
extern int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol, extern int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol,
......
This diff is collapsed.
...@@ -296,13 +296,13 @@ typedef struct { ...@@ -296,13 +296,13 @@ typedef struct {
/* sizeof() = 160 (0xa0) bytes */ /* sizeof() = 160 (0xa0) bytes */
} __attribute__ ((__packed__)) LOG_CLIENT_RECORD; } __attribute__ ((__packed__)) LOG_CLIENT_RECORD;
extern BOOL ntfs_check_logfile(struct inode *log_vi, extern bool ntfs_check_logfile(struct inode *log_vi,
RESTART_PAGE_HEADER **rp); RESTART_PAGE_HEADER **rp);
extern BOOL ntfs_is_logfile_clean(struct inode *log_vi, extern bool ntfs_is_logfile_clean(struct inode *log_vi,
const RESTART_PAGE_HEADER *rp); const RESTART_PAGE_HEADER *rp);
extern BOOL ntfs_empty_logfile(struct inode *log_vi); extern bool ntfs_empty_logfile(struct inode *log_vi);
#endif /* NTFS_RW */ #endif /* NTFS_RW */
......
...@@ -251,7 +251,7 @@ MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref, ...@@ -251,7 +251,7 @@ MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref,
int i; int i;
unsigned long mft_no = MREF(mref); unsigned long mft_no = MREF(mref);
u16 seq_no = MSEQNO(mref); u16 seq_no = MSEQNO(mref);
BOOL destroy_ni = FALSE; bool destroy_ni = false;
ntfs_debug("Mapping extent mft record 0x%lx (base mft record 0x%lx).", ntfs_debug("Mapping extent mft record 0x%lx (base mft record 0x%lx).",
mft_no, base_ni->mft_no); mft_no, base_ni->mft_no);
...@@ -322,7 +322,7 @@ MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref, ...@@ -322,7 +322,7 @@ MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref,
if (seq_no && (le16_to_cpu(m->sequence_number) != seq_no)) { if (seq_no && (le16_to_cpu(m->sequence_number) != seq_no)) {
ntfs_error(base_ni->vol->sb, "Found stale extent mft " ntfs_error(base_ni->vol->sb, "Found stale extent mft "
"reference! Corrupt filesystem. Run chkdsk."); "reference! Corrupt filesystem. Run chkdsk.");
destroy_ni = TRUE; destroy_ni = true;
m = ERR_PTR(-EIO); m = ERR_PTR(-EIO);
goto unm_err_out; goto unm_err_out;
} }
...@@ -335,7 +335,7 @@ MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref, ...@@ -335,7 +335,7 @@ MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref,
if (unlikely(!tmp)) { if (unlikely(!tmp)) {
ntfs_error(base_ni->vol->sb, "Failed to allocate " ntfs_error(base_ni->vol->sb, "Failed to allocate "
"internal buffer."); "internal buffer.");
destroy_ni = TRUE; destroy_ni = true;
m = ERR_PTR(-ENOMEM); m = ERR_PTR(-ENOMEM);
goto unm_err_out; goto unm_err_out;
} }
...@@ -857,7 +857,7 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync) ...@@ -857,7 +857,7 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
* caller is responsible for unlocking the ntfs inode and unpinning the base * caller is responsible for unlocking the ntfs inode and unpinning the base
* vfs inode. * vfs inode.
* *
* Return TRUE if the mft record may be written out and FALSE if not. * Return 'true' if the mft record may be written out and 'false' if not.
* *
* The caller has locked the page and cleared the uptodate flag on it which * The caller has locked the page and cleared the uptodate flag on it which
* means that we can safely write out any dirty mft records that do not have * means that we can safely write out any dirty mft records that do not have
...@@ -868,7 +868,7 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync) ...@@ -868,7 +868,7 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
* Here is a description of the tests we perform: * Here is a description of the tests we perform:
* *
* If the inode is found in icache we know the mft record must be a base mft * If the inode is found in icache we know the mft record must be a base mft
* record. If it is dirty, we do not write it and return FALSE as the vfs * record. If it is dirty, we do not write it and return 'false' as the vfs
* inode write paths will result in the access times being updated which would * inode write paths will result in the access times being updated which would
* cause the base mft record to be redirtied and written out again. (We know * cause the base mft record to be redirtied and written out again. (We know
* the access time update will modify the base mft record because Windows * the access time update will modify the base mft record because Windows
...@@ -877,11 +877,11 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync) ...@@ -877,11 +877,11 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
* *
* If the inode is in icache and not dirty, we attempt to lock the mft record * If the inode is in icache and not dirty, we attempt to lock the mft record
* and if we find the lock was already taken, it is not safe to write the mft * and if we find the lock was already taken, it is not safe to write the mft
* record and we return FALSE. * record and we return 'false'.
* *
* If we manage to obtain the lock we have exclusive access to the mft record, * If we manage to obtain the lock we have exclusive access to the mft record,
* which also allows us safe writeout of the mft record. We then set * which also allows us safe writeout of the mft record. We then set
* @locked_ni to the locked ntfs inode and return TRUE. * @locked_ni to the locked ntfs inode and return 'true'.
* *
* Note we cannot just lock the mft record and sleep while waiting for the lock * Note we cannot just lock the mft record and sleep while waiting for the lock
* because this would deadlock due to lock reversal (normally the mft record is * because this would deadlock due to lock reversal (normally the mft record is
...@@ -891,24 +891,24 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync) ...@@ -891,24 +891,24 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
* If the inode is not in icache we need to perform further checks. * If the inode is not in icache we need to perform further checks.
* *
* If the mft record is not a FILE record or it is a base mft record, we can * If the mft record is not a FILE record or it is a base mft record, we can
* safely write it and return TRUE. * safely write it and return 'true'.
* *
* We now know the mft record is an extent mft record. We check if the inode * We now know the mft record is an extent mft record. We check if the inode
* corresponding to its base mft record is in icache and obtain a reference to * corresponding to its base mft record is in icache and obtain a reference to
* it if it is. If it is not, we can safely write it and return TRUE. * it if it is. If it is not, we can safely write it and return 'true'.
* *
* We now have the base inode for the extent mft record. We check if it has an * We now have the base inode for the extent mft record. We check if it has an
* ntfs inode for the extent mft record attached and if not it is safe to write * ntfs inode for the extent mft record attached and if not it is safe to write
* the extent mft record and we return TRUE. * the extent mft record and we return 'true'.
* *
* The ntfs inode for the extent mft record is attached to the base inode so we * The ntfs inode for the extent mft record is attached to the base inode so we
* attempt to lock the extent mft record and if we find the lock was already * attempt to lock the extent mft record and if we find the lock was already
* taken, it is not safe to write the extent mft record and we return FALSE. * taken, it is not safe to write the extent mft record and we return 'false'.
* *
* If we manage to obtain the lock we have exclusive access to the extent mft * If we manage to obtain the lock we have exclusive access to the extent mft
* record, which also allows us safe writeout of the extent mft record. We * record, which also allows us safe writeout of the extent mft record. We
* set the ntfs inode of the extent mft record clean and then set @locked_ni to * set the ntfs inode of the extent mft record clean and then set @locked_ni to
* the now locked ntfs inode and return TRUE. * the now locked ntfs inode and return 'true'.
* *
* Note, the reason for actually writing dirty mft records here and not just * Note, the reason for actually writing dirty mft records here and not just
* relying on the vfs inode dirty code paths is that we can have mft records * relying on the vfs inode dirty code paths is that we can have mft records
...@@ -922,7 +922,7 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync) ...@@ -922,7 +922,7 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
* appear if the mft record is reused for a new inode before it got written * appear if the mft record is reused for a new inode before it got written
* out. * out.
*/ */
BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no, bool ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no,
const MFT_RECORD *m, ntfs_inode **locked_ni) const MFT_RECORD *m, ntfs_inode **locked_ni)
{ {
struct super_block *sb = vol->sb; struct super_block *sb = vol->sb;
...@@ -977,7 +977,7 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no, ...@@ -977,7 +977,7 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no,
mft_no); mft_no);
atomic_dec(&ni->count); atomic_dec(&ni->count);
iput(vi); iput(vi);
return FALSE; return false;
} }
ntfs_debug("Inode 0x%lx is not dirty.", mft_no); ntfs_debug("Inode 0x%lx is not dirty.", mft_no);
/* The inode is not dirty, try to take the mft record lock. */ /* The inode is not dirty, try to take the mft record lock. */
...@@ -986,7 +986,7 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no, ...@@ -986,7 +986,7 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no,
"not write it.", mft_no); "not write it.", mft_no);
atomic_dec(&ni->count); atomic_dec(&ni->count);
iput(vi); iput(vi);
return FALSE; return false;
} }
ntfs_debug("Managed to lock mft record 0x%lx, write it.", ntfs_debug("Managed to lock mft record 0x%lx, write it.",
mft_no); mft_no);
...@@ -995,7 +995,7 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no, ...@@ -995,7 +995,7 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no,
* return the locked ntfs inode. * return the locked ntfs inode.
*/ */
*locked_ni = ni; *locked_ni = ni;
return TRUE; return true;
} }
ntfs_debug("Inode 0x%lx is not in icache.", mft_no); ntfs_debug("Inode 0x%lx is not in icache.", mft_no);
/* The inode is not in icache. */ /* The inode is not in icache. */
...@@ -1003,13 +1003,13 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no, ...@@ -1003,13 +1003,13 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no,
if (!ntfs_is_mft_record(m->magic)) { if (!ntfs_is_mft_record(m->magic)) {
ntfs_debug("Mft record 0x%lx is not a FILE record, write it.", ntfs_debug("Mft record 0x%lx is not a FILE record, write it.",
mft_no); mft_no);
return TRUE; return true;
} }
/* Write the mft record if it is a base inode. */ /* Write the mft record if it is a base inode. */
if (!m->base_mft_record) { if (!m->base_mft_record) {
ntfs_debug("Mft record 0x%lx is a base record, write it.", ntfs_debug("Mft record 0x%lx is a base record, write it.",
mft_no); mft_no);
return TRUE; return true;
} }
/* /*
* This is an extent mft record. Check if the inode corresponding to * This is an extent mft record. Check if the inode corresponding to
...@@ -1033,7 +1033,7 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no, ...@@ -1033,7 +1033,7 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no,
*/ */
ntfs_debug("Base inode 0x%lx is not in icache, write the " ntfs_debug("Base inode 0x%lx is not in icache, write the "
"extent record.", na.mft_no); "extent record.", na.mft_no);
return TRUE; return true;
} }
ntfs_debug("Base inode 0x%lx is in icache.", na.mft_no); ntfs_debug("Base inode 0x%lx is in icache.", na.mft_no);
/* /*
...@@ -1051,7 +1051,7 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no, ...@@ -1051,7 +1051,7 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no,
iput(vi); iput(vi);
ntfs_debug("Base inode 0x%lx has no attached extent inodes, " ntfs_debug("Base inode 0x%lx has no attached extent inodes, "
"write the extent record.", na.mft_no); "write the extent record.", na.mft_no);
return TRUE; return true;
} }
/* Iterate over the attached extent inodes. */ /* Iterate over the attached extent inodes. */
extent_nis = ni->ext.extent_ntfs_inos; extent_nis = ni->ext.extent_ntfs_inos;
...@@ -1075,7 +1075,7 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no, ...@@ -1075,7 +1075,7 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no,
ntfs_debug("Extent inode 0x%lx is not attached to its base " ntfs_debug("Extent inode 0x%lx is not attached to its base "
"inode 0x%lx, write the extent record.", "inode 0x%lx, write the extent record.",
mft_no, na.mft_no); mft_no, na.mft_no);
return TRUE; return true;
} }
ntfs_debug("Extent inode 0x%lx is attached to its base inode 0x%lx.", ntfs_debug("Extent inode 0x%lx is attached to its base inode 0x%lx.",
mft_no, na.mft_no); mft_no, na.mft_no);
...@@ -1091,7 +1091,7 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no, ...@@ -1091,7 +1091,7 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no,
iput(vi); iput(vi);
ntfs_debug("Extent mft record 0x%lx is already locked, do " ntfs_debug("Extent mft record 0x%lx is already locked, do "
"not write it.", mft_no); "not write it.", mft_no);
return FALSE; return false;
} }
ntfs_debug("Managed to lock extent mft record 0x%lx, write it.", ntfs_debug("Managed to lock extent mft record 0x%lx, write it.",
mft_no); mft_no);
...@@ -1103,7 +1103,7 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no, ...@@ -1103,7 +1103,7 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no,
* the locked extent ntfs inode. * the locked extent ntfs inode.
*/ */
*locked_ni = eni; *locked_ni = eni;
return TRUE; return true;
} }
static const char *es = " Leaving inconsistent metadata. Unmount and run " static const char *es = " Leaving inconsistent metadata. Unmount and run "
...@@ -1354,7 +1354,7 @@ static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol) ...@@ -1354,7 +1354,7 @@ static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol)
ntfs_unmap_page(page); ntfs_unmap_page(page);
/* Allocate a cluster from the DATA_ZONE. */ /* Allocate a cluster from the DATA_ZONE. */
rl2 = ntfs_cluster_alloc(vol, rl[1].vcn, 1, lcn, DATA_ZONE, rl2 = ntfs_cluster_alloc(vol, rl[1].vcn, 1, lcn, DATA_ZONE,
TRUE); true);
if (IS_ERR(rl2)) { if (IS_ERR(rl2)) {
up_write(&mftbmp_ni->runlist.lock); up_write(&mftbmp_ni->runlist.lock);
ntfs_error(vol->sb, "Failed to allocate a cluster for " ntfs_error(vol->sb, "Failed to allocate a cluster for "
...@@ -1724,7 +1724,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol) ...@@ -1724,7 +1724,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
ATTR_RECORD *a = NULL; ATTR_RECORD *a = NULL;
int ret, mp_size; int ret, mp_size;
u32 old_alen = 0; u32 old_alen = 0;
BOOL mp_rebuilt = FALSE; bool mp_rebuilt = false;
ntfs_debug("Extending mft data allocation."); ntfs_debug("Extending mft data allocation.");
mft_ni = NTFS_I(vol->mft_ino); mft_ni = NTFS_I(vol->mft_ino);
...@@ -1780,7 +1780,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol) ...@@ -1780,7 +1780,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
old_last_vcn = rl[1].vcn; old_last_vcn = rl[1].vcn;
do { do {
rl2 = ntfs_cluster_alloc(vol, old_last_vcn, nr, lcn, MFT_ZONE, rl2 = ntfs_cluster_alloc(vol, old_last_vcn, nr, lcn, MFT_ZONE,
TRUE); true);
if (likely(!IS_ERR(rl2))) if (likely(!IS_ERR(rl2)))
break; break;
if (PTR_ERR(rl2) != -ENOSPC || nr == min_nr) { if (PTR_ERR(rl2) != -ENOSPC || nr == min_nr) {
...@@ -1884,7 +1884,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol) ...@@ -1884,7 +1884,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
goto undo_alloc; goto undo_alloc;
} }
mp_rebuilt = TRUE; mp_rebuilt = true;
/* Generate the mapping pairs array directly into the attr record. */ /* Generate the mapping pairs array directly into the attr record. */
ret = ntfs_mapping_pairs_build(vol, (u8*)a + ret = ntfs_mapping_pairs_build(vol, (u8*)a +
le16_to_cpu(a->data.non_resident.mapping_pairs_offset), le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
...@@ -2255,7 +2255,7 @@ ntfs_inode *ntfs_mft_record_alloc(ntfs_volume *vol, const int mode, ...@@ -2255,7 +2255,7 @@ ntfs_inode *ntfs_mft_record_alloc(ntfs_volume *vol, const int mode,
unsigned int ofs; unsigned int ofs;
int err; int err;
le16 seq_no, usn; le16 seq_no, usn;
BOOL record_formatted = FALSE; bool record_formatted = false;
if (base_ni) { if (base_ni) {
ntfs_debug("Entering (allocating an extent mft record for " ntfs_debug("Entering (allocating an extent mft record for "
...@@ -2454,7 +2454,7 @@ ntfs_inode *ntfs_mft_record_alloc(ntfs_volume *vol, const int mode, ...@@ -2454,7 +2454,7 @@ ntfs_inode *ntfs_mft_record_alloc(ntfs_volume *vol, const int mode,
mft_ni->initialized_size = new_initialized_size; mft_ni->initialized_size = new_initialized_size;
} }
write_unlock_irqrestore(&mft_ni->size_lock, flags); write_unlock_irqrestore(&mft_ni->size_lock, flags);
record_formatted = TRUE; record_formatted = true;
/* Update the mft data attribute record to reflect the new sizes. */ /* Update the mft data attribute record to reflect the new sizes. */
m = map_mft_record(mft_ni); m = map_mft_record(mft_ni);
if (IS_ERR(m)) { if (IS_ERR(m)) {
......
...@@ -111,7 +111,7 @@ static inline int write_mft_record(ntfs_inode *ni, MFT_RECORD *m, int sync) ...@@ -111,7 +111,7 @@ static inline int write_mft_record(ntfs_inode *ni, MFT_RECORD *m, int sync)
return err; return err;
} }
extern BOOL ntfs_may_write_mft_record(ntfs_volume *vol, extern bool ntfs_may_write_mft_record(ntfs_volume *vol,
const unsigned long mft_no, const MFT_RECORD *m, const unsigned long mft_no, const MFT_RECORD *m,
ntfs_inode **locked_ni); ntfs_inode **locked_ni);
......
...@@ -105,7 +105,7 @@ extern int pre_write_mst_fixup(NTFS_RECORD *b, const u32 size); ...@@ -105,7 +105,7 @@ extern int pre_write_mst_fixup(NTFS_RECORD *b, const u32 size);
extern void post_write_mst_fixup(NTFS_RECORD *b); extern void post_write_mst_fixup(NTFS_RECORD *b);
/* From fs/ntfs/unistr.c */ /* From fs/ntfs/unistr.c */
extern BOOL ntfs_are_names_equal(const ntfschar *s1, size_t s1_len, extern bool ntfs_are_names_equal(const ntfschar *s1, size_t s1_len,
const ntfschar *s2, size_t s2_len, const ntfschar *s2, size_t s2_len,
const IGNORE_CASE_BOOL ic, const IGNORE_CASE_BOOL ic,
const ntfschar *upcase, const u32 upcase_size); const ntfschar *upcase, const u32 upcase_size);
......
...@@ -31,10 +31,10 @@ ...@@ -31,10 +31,10 @@
* ntfs_mark_quotas_out_of_date - mark the quotas out of date on an ntfs volume * ntfs_mark_quotas_out_of_date - mark the quotas out of date on an ntfs volume
* @vol: ntfs volume on which to mark the quotas out of date * @vol: ntfs volume on which to mark the quotas out of date
* *
* Mark the quotas out of date on the ntfs volume @vol and return TRUE on * Mark the quotas out of date on the ntfs volume @vol and return 'true' on
* success and FALSE on error. * success and 'false' on error.
*/ */
BOOL ntfs_mark_quotas_out_of_date(ntfs_volume *vol) bool ntfs_mark_quotas_out_of_date(ntfs_volume *vol)
{ {
ntfs_index_context *ictx; ntfs_index_context *ictx;
QUOTA_CONTROL_ENTRY *qce; QUOTA_CONTROL_ENTRY *qce;
...@@ -46,7 +46,7 @@ BOOL ntfs_mark_quotas_out_of_date(ntfs_volume *vol) ...@@ -46,7 +46,7 @@ BOOL ntfs_mark_quotas_out_of_date(ntfs_volume *vol)
goto done; goto done;
if (!vol->quota_ino || !vol->quota_q_ino) { if (!vol->quota_ino || !vol->quota_q_ino) {
ntfs_error(vol->sb, "Quota inodes are not open."); ntfs_error(vol->sb, "Quota inodes are not open.");
return FALSE; return false;
} }
mutex_lock(&vol->quota_q_ino->i_mutex); mutex_lock(&vol->quota_q_ino->i_mutex);
ictx = ntfs_index_ctx_get(NTFS_I(vol->quota_q_ino)); ictx = ntfs_index_ctx_get(NTFS_I(vol->quota_q_ino));
...@@ -106,12 +106,12 @@ BOOL ntfs_mark_quotas_out_of_date(ntfs_volume *vol) ...@@ -106,12 +106,12 @@ BOOL ntfs_mark_quotas_out_of_date(ntfs_volume *vol)
NVolSetQuotaOutOfDate(vol); NVolSetQuotaOutOfDate(vol);
done: done:
ntfs_debug("Done."); ntfs_debug("Done.");
return TRUE; return true;
err_out: err_out:
if (ictx) if (ictx)
ntfs_index_ctx_put(ictx); ntfs_index_ctx_put(ictx);
mutex_unlock(&vol->quota_q_ino->i_mutex); mutex_unlock(&vol->quota_q_ino->i_mutex);
return FALSE; return false;
} }
#endif /* NTFS_RW */ #endif /* NTFS_RW */
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include "types.h" #include "types.h"
#include "volume.h" #include "volume.h"
extern BOOL ntfs_mark_quotas_out_of_date(ntfs_volume *vol); extern bool ntfs_mark_quotas_out_of_date(ntfs_volume *vol);
#endif /* NTFS_RW */ #endif /* NTFS_RW */
......
...@@ -149,10 +149,10 @@ static inline runlist_element *ntfs_rl_realloc_nofail(runlist_element *rl, ...@@ -149,10 +149,10 @@ static inline runlist_element *ntfs_rl_realloc_nofail(runlist_element *rl,
* *
* It is up to the caller to serialize access to the runlists @dst and @src. * It is up to the caller to serialize access to the runlists @dst and @src.
* *
* Return: TRUE Success, the runlists can be merged. * Return: true Success, the runlists can be merged.
* FALSE Failure, the runlists cannot be merged. * false Failure, the runlists cannot be merged.
*/ */
static inline BOOL ntfs_are_rl_mergeable(runlist_element *dst, static inline bool ntfs_are_rl_mergeable(runlist_element *dst,
runlist_element *src) runlist_element *src)
{ {
BUG_ON(!dst); BUG_ON(!dst);
...@@ -160,19 +160,19 @@ static inline BOOL ntfs_are_rl_mergeable(runlist_element *dst, ...@@ -160,19 +160,19 @@ static inline BOOL ntfs_are_rl_mergeable(runlist_element *dst,
/* We can merge unmapped regions even if they are misaligned. */ /* We can merge unmapped regions even if they are misaligned. */
if ((dst->lcn == LCN_RL_NOT_MAPPED) && (src->lcn == LCN_RL_NOT_MAPPED)) if ((dst->lcn == LCN_RL_NOT_MAPPED) && (src->lcn == LCN_RL_NOT_MAPPED))
return TRUE; return true;
/* If the runs are misaligned, we cannot merge them. */ /* If the runs are misaligned, we cannot merge them. */
if ((dst->vcn + dst->length) != src->vcn) if ((dst->vcn + dst->length) != src->vcn)
return FALSE; return false;
/* If both runs are non-sparse and contiguous, we can merge them. */ /* If both runs are non-sparse and contiguous, we can merge them. */
if ((dst->lcn >= 0) && (src->lcn >= 0) && if ((dst->lcn >= 0) && (src->lcn >= 0) &&
((dst->lcn + dst->length) == src->lcn)) ((dst->lcn + dst->length) == src->lcn))
return TRUE; return true;
/* If we are merging two holes, we can merge them. */ /* If we are merging two holes, we can merge them. */
if ((dst->lcn == LCN_HOLE) && (src->lcn == LCN_HOLE)) if ((dst->lcn == LCN_HOLE) && (src->lcn == LCN_HOLE))
return TRUE; return true;
/* Cannot merge. */ /* Cannot merge. */
return FALSE; return false;
} }
/** /**
...@@ -218,7 +218,7 @@ static inline void __ntfs_rl_merge(runlist_element *dst, runlist_element *src) ...@@ -218,7 +218,7 @@ static inline void __ntfs_rl_merge(runlist_element *dst, runlist_element *src)
static inline runlist_element *ntfs_rl_append(runlist_element *dst, static inline runlist_element *ntfs_rl_append(runlist_element *dst,
int dsize, runlist_element *src, int ssize, int loc) int dsize, runlist_element *src, int ssize, int loc)
{ {
BOOL right = FALSE; /* Right end of @src needs merging. */ bool right = false; /* Right end of @src needs merging. */
int marker; /* End of the inserted runs. */ int marker; /* End of the inserted runs. */
BUG_ON(!dst); BUG_ON(!dst);
...@@ -285,8 +285,8 @@ static inline runlist_element *ntfs_rl_append(runlist_element *dst, ...@@ -285,8 +285,8 @@ static inline runlist_element *ntfs_rl_append(runlist_element *dst,
static inline runlist_element *ntfs_rl_insert(runlist_element *dst, static inline runlist_element *ntfs_rl_insert(runlist_element *dst,
int dsize, runlist_element *src, int ssize, int loc) int dsize, runlist_element *src, int ssize, int loc)
{ {
BOOL left = FALSE; /* Left end of @src needs merging. */ bool left = false; /* Left end of @src needs merging. */
BOOL disc = FALSE; /* Discontinuity between @dst and @src. */ bool disc = false; /* Discontinuity between @dst and @src. */
int marker; /* End of the inserted runs. */ int marker; /* End of the inserted runs. */
BUG_ON(!dst); BUG_ON(!dst);
...@@ -382,8 +382,8 @@ static inline runlist_element *ntfs_rl_replace(runlist_element *dst, ...@@ -382,8 +382,8 @@ static inline runlist_element *ntfs_rl_replace(runlist_element *dst,
int dsize, runlist_element *src, int ssize, int loc) int dsize, runlist_element *src, int ssize, int loc)
{ {
signed delta; signed delta;
BOOL left = FALSE; /* Left end of @src needs merging. */ bool left = false; /* Left end of @src needs merging. */
BOOL right = FALSE; /* Right end of @src needs merging. */ bool right = false; /* Right end of @src needs merging. */
int tail; /* Start of tail of @dst. */ int tail; /* Start of tail of @dst. */
int marker; /* End of the inserted runs. */ int marker; /* End of the inserted runs. */
...@@ -620,8 +620,8 @@ runlist_element *ntfs_runlists_merge(runlist_element *drl, ...@@ -620,8 +620,8 @@ runlist_element *ntfs_runlists_merge(runlist_element *drl,
; ;
{ {
BOOL start; bool start;
BOOL finish; bool finish;
int ds = dend + 1; /* Number of elements in drl & srl */ int ds = dend + 1; /* Number of elements in drl & srl */
int ss = sfinal - sstart + 1; int ss = sfinal - sstart + 1;
...@@ -635,7 +635,7 @@ runlist_element *ntfs_runlists_merge(runlist_element *drl, ...@@ -635,7 +635,7 @@ runlist_element *ntfs_runlists_merge(runlist_element *drl,
if (finish && !drl[dins].length) if (finish && !drl[dins].length)
ss++; ss++;
if (marker && (drl[dins].vcn + drl[dins].length > srl[send - 1].vcn)) if (marker && (drl[dins].vcn + drl[dins].length > srl[send - 1].vcn))
finish = FALSE; finish = false;
#if 0 #if 0
ntfs_debug("dfinal = %i, dend = %i", dfinal, dend); ntfs_debug("dfinal = %i, dend = %i", dfinal, dend);
ntfs_debug("sstart = %i, sfinal = %i, send = %i", sstart, sfinal, send); ntfs_debug("sstart = %i, sfinal = %i, send = %i", sstart, sfinal, send);
...@@ -1134,7 +1134,7 @@ int ntfs_get_size_for_mapping_pairs(const ntfs_volume *vol, ...@@ -1134,7 +1134,7 @@ int ntfs_get_size_for_mapping_pairs(const ntfs_volume *vol,
{ {
LCN prev_lcn; LCN prev_lcn;
int rls; int rls;
BOOL the_end = FALSE; bool the_end = false;
BUG_ON(first_vcn < 0); BUG_ON(first_vcn < 0);
BUG_ON(last_vcn < -1); BUG_ON(last_vcn < -1);
...@@ -1168,7 +1168,7 @@ int ntfs_get_size_for_mapping_pairs(const ntfs_volume *vol, ...@@ -1168,7 +1168,7 @@ int ntfs_get_size_for_mapping_pairs(const ntfs_volume *vol,
s64 s1 = last_vcn + 1; s64 s1 = last_vcn + 1;
if (unlikely(rl[1].vcn > s1)) if (unlikely(rl[1].vcn > s1))
length = s1 - rl->vcn; length = s1 - rl->vcn;
the_end = TRUE; the_end = true;
} }
delta = first_vcn - rl->vcn; delta = first_vcn - rl->vcn;
/* Header byte + length. */ /* Header byte + length. */
...@@ -1204,7 +1204,7 @@ int ntfs_get_size_for_mapping_pairs(const ntfs_volume *vol, ...@@ -1204,7 +1204,7 @@ int ntfs_get_size_for_mapping_pairs(const ntfs_volume *vol,
s64 s1 = last_vcn + 1; s64 s1 = last_vcn + 1;
if (unlikely(rl[1].vcn > s1)) if (unlikely(rl[1].vcn > s1))
length = s1 - rl->vcn; length = s1 - rl->vcn;
the_end = TRUE; the_end = true;
} }
/* Header byte + length. */ /* Header byte + length. */
rls += 1 + ntfs_get_nr_significant_bytes(length); rls += 1 + ntfs_get_nr_significant_bytes(length);
...@@ -1327,7 +1327,7 @@ int ntfs_mapping_pairs_build(const ntfs_volume *vol, s8 *dst, ...@@ -1327,7 +1327,7 @@ int ntfs_mapping_pairs_build(const ntfs_volume *vol, s8 *dst,
LCN prev_lcn; LCN prev_lcn;
s8 *dst_max, *dst_next; s8 *dst_max, *dst_next;
int err = -ENOSPC; int err = -ENOSPC;
BOOL the_end = FALSE; bool the_end = false;
s8 len_len, lcn_len; s8 len_len, lcn_len;
BUG_ON(first_vcn < 0); BUG_ON(first_vcn < 0);
...@@ -1370,7 +1370,7 @@ int ntfs_mapping_pairs_build(const ntfs_volume *vol, s8 *dst, ...@@ -1370,7 +1370,7 @@ int ntfs_mapping_pairs_build(const ntfs_volume *vol, s8 *dst,
s64 s1 = last_vcn + 1; s64 s1 = last_vcn + 1;
if (unlikely(rl[1].vcn > s1)) if (unlikely(rl[1].vcn > s1))
length = s1 - rl->vcn; length = s1 - rl->vcn;
the_end = TRUE; the_end = true;
} }
delta = first_vcn - rl->vcn; delta = first_vcn - rl->vcn;
/* Write length. */ /* Write length. */
...@@ -1422,7 +1422,7 @@ int ntfs_mapping_pairs_build(const ntfs_volume *vol, s8 *dst, ...@@ -1422,7 +1422,7 @@ int ntfs_mapping_pairs_build(const ntfs_volume *vol, s8 *dst,
s64 s1 = last_vcn + 1; s64 s1 = last_vcn + 1;
if (unlikely(rl[1].vcn > s1)) if (unlikely(rl[1].vcn > s1))
length = s1 - rl->vcn; length = s1 - rl->vcn;
the_end = TRUE; the_end = true;
} }
/* Write length. */ /* Write length. */
len_len = ntfs_write_significant_bytes(dst + 1, dst_max, len_len = ntfs_write_significant_bytes(dst + 1, dst_max,
...@@ -1541,7 +1541,7 @@ int ntfs_rl_truncate_nolock(const ntfs_volume *vol, runlist *const runlist, ...@@ -1541,7 +1541,7 @@ int ntfs_rl_truncate_nolock(const ntfs_volume *vol, runlist *const runlist,
*/ */
if (rl->length) { if (rl->length) {
runlist_element *trl; runlist_element *trl;
BOOL is_end; bool is_end;
ntfs_debug("Shrinking runlist."); ntfs_debug("Shrinking runlist.");
/* Determine the runlist size. */ /* Determine the runlist size. */
...@@ -1555,11 +1555,11 @@ int ntfs_rl_truncate_nolock(const ntfs_volume *vol, runlist *const runlist, ...@@ -1555,11 +1555,11 @@ int ntfs_rl_truncate_nolock(const ntfs_volume *vol, runlist *const runlist,
* If a run was partially truncated, make the following runlist * If a run was partially truncated, make the following runlist
* element a terminator. * element a terminator.
*/ */
is_end = FALSE; is_end = false;
if (rl->length) { if (rl->length) {
rl++; rl++;
if (!rl->length) if (!rl->length)
is_end = TRUE; is_end = true;
rl->vcn = new_length; rl->vcn = new_length;
rl->length = 0; rl->length = 0;
} }
...@@ -1648,7 +1648,7 @@ int ntfs_rl_punch_nolock(const ntfs_volume *vol, runlist *const runlist, ...@@ -1648,7 +1648,7 @@ int ntfs_rl_punch_nolock(const ntfs_volume *vol, runlist *const runlist,
s64 delta; s64 delta;
runlist_element *rl, *rl_end, *rl_real_end, *trl; runlist_element *rl, *rl_end, *rl_real_end, *trl;
int old_size; int old_size;
BOOL lcn_fixup = FALSE; bool lcn_fixup = false;
ntfs_debug("Entering for start 0x%llx, length 0x%llx.", ntfs_debug("Entering for start 0x%llx, length 0x%llx.",
(long long)start, (long long)length); (long long)start, (long long)length);
...@@ -1862,7 +1862,7 @@ int ntfs_rl_punch_nolock(const ntfs_volume *vol, runlist *const runlist, ...@@ -1862,7 +1862,7 @@ int ntfs_rl_punch_nolock(const ntfs_volume *vol, runlist *const runlist,
if (rl->lcn >= 0) { if (rl->lcn >= 0) {
rl->lcn -= delta; rl->lcn -= delta;
/* Need this in case the lcn just became negative. */ /* Need this in case the lcn just became negative. */
lcn_fixup = TRUE; lcn_fixup = true;
} }
rl->length += delta; rl->length += delta;
goto split_end; goto split_end;
......
This diff is collapsed.
...@@ -61,11 +61,6 @@ typedef sle64 leLSN; ...@@ -61,11 +61,6 @@ typedef sle64 leLSN;
typedef s64 USN; typedef s64 USN;
typedef sle64 leUSN; typedef sle64 leUSN;
typedef enum {
FALSE = 0,
TRUE = 1
} BOOL;
typedef enum { typedef enum {
CASE_SENSITIVE = 0, CASE_SENSITIVE = 0,
IGNORE_CASE = 1, IGNORE_CASE = 1,
......
...@@ -61,16 +61,16 @@ static const u8 legal_ansi_char_array[0x40] = { ...@@ -61,16 +61,16 @@ static const u8 legal_ansi_char_array[0x40] = {
* @upcase: upcase table (only if @ic == IGNORE_CASE) * @upcase: upcase table (only if @ic == IGNORE_CASE)
* @upcase_size: length in Unicode characters of @upcase (if present) * @upcase_size: length in Unicode characters of @upcase (if present)
* *
* Compare the names @s1 and @s2 and return TRUE (1) if the names are * Compare the names @s1 and @s2 and return 'true' (1) if the names are
* identical, or FALSE (0) if they are not identical. If @ic is IGNORE_CASE, * identical, or 'false' (0) if they are not identical. If @ic is IGNORE_CASE,
* the @upcase table is used to performa a case insensitive comparison. * the @upcase table is used to performa a case insensitive comparison.
*/ */
BOOL ntfs_are_names_equal(const ntfschar *s1, size_t s1_len, bool ntfs_are_names_equal(const ntfschar *s1, size_t s1_len,
const ntfschar *s2, size_t s2_len, const IGNORE_CASE_BOOL ic, const ntfschar *s2, size_t s2_len, const IGNORE_CASE_BOOL ic,
const ntfschar *upcase, const u32 upcase_size) const ntfschar *upcase, const u32 upcase_size)
{ {
if (s1_len != s2_len) if (s1_len != s2_len)
return FALSE; return false;
if (ic == CASE_SENSITIVE) if (ic == CASE_SENSITIVE)
return !ntfs_ucsncmp(s1, s2, s1_len); return !ntfs_ucsncmp(s1, s2, s1_len);
return !ntfs_ucsncasecmp(s1, s2, s1_len, upcase, upcase_size); return !ntfs_ucsncasecmp(s1, s2, s1_len, upcase, upcase_size);
......
...@@ -39,12 +39,12 @@ ...@@ -39,12 +39,12 @@
* @vol: ntfs volume on which to stamp the transaction log * @vol: ntfs volume on which to stamp the transaction log
* *
* Stamp the transaction log ($UsnJrnl) on the ntfs volume @vol and return * Stamp the transaction log ($UsnJrnl) on the ntfs volume @vol and return
* TRUE on success and FALSE on error. * 'true' on success and 'false' on error.
* *
* This function assumes that the transaction log has already been loaded and * This function assumes that the transaction log has already been loaded and
* consistency checked by a call to fs/ntfs/super.c::load_and_init_usnjrnl(). * consistency checked by a call to fs/ntfs/super.c::load_and_init_usnjrnl().
*/ */
BOOL ntfs_stamp_usnjrnl(ntfs_volume *vol) bool ntfs_stamp_usnjrnl(ntfs_volume *vol)
{ {
ntfs_debug("Entering."); ntfs_debug("Entering.");
if (likely(!NVolUsnJrnlStamped(vol))) { if (likely(!NVolUsnJrnlStamped(vol))) {
...@@ -56,7 +56,7 @@ BOOL ntfs_stamp_usnjrnl(ntfs_volume *vol) ...@@ -56,7 +56,7 @@ BOOL ntfs_stamp_usnjrnl(ntfs_volume *vol)
if (IS_ERR(page)) { if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read from " ntfs_error(vol->sb, "Failed to read from "
"$UsnJrnl/$DATA/$Max attribute."); "$UsnJrnl/$DATA/$Max attribute.");
return FALSE; return false;
} }
uh = (USN_HEADER*)page_address(page); uh = (USN_HEADER*)page_address(page);
stamp = get_current_ntfs_time(); stamp = get_current_ntfs_time();
...@@ -78,7 +78,7 @@ BOOL ntfs_stamp_usnjrnl(ntfs_volume *vol) ...@@ -78,7 +78,7 @@ BOOL ntfs_stamp_usnjrnl(ntfs_volume *vol)
NVolSetUsnJrnlStamped(vol); NVolSetUsnJrnlStamped(vol);
} }
ntfs_debug("Done."); ntfs_debug("Done.");
return TRUE; return true;
} }
#endif /* NTFS_RW */ #endif /* NTFS_RW */
...@@ -198,7 +198,7 @@ typedef struct { ...@@ -198,7 +198,7 @@ typedef struct {
/* sizeof() = 60 (0x3c) bytes */ /* sizeof() = 60 (0x3c) bytes */
} __attribute__ ((__packed__)) USN_RECORD; } __attribute__ ((__packed__)) USN_RECORD;
extern BOOL ntfs_stamp_usnjrnl(ntfs_volume *vol); extern bool ntfs_stamp_usnjrnl(ntfs_volume *vol);
#endif /* NTFS_RW */ #endif /* NTFS_RW */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment