Commit 96de65a9 authored by Konstantin Komarov's avatar Konstantin Komarov

fs/ntfs3: Code formatting and refactoring

Added minor refactoring.
Added and fixed some comments.
In some places, the code has been reformatted to fit into 80 columns.
clang-format-12 was used to format code according kernel's .clang-format.
Signed-off-by: default avatarKonstantin Komarov <almaz.alexandrovich@paragon-software.com>
parent 75c5e0c9
...@@ -405,8 +405,8 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, ...@@ -405,8 +405,8 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
int err = 0; int err = 0;
struct ntfs_sb_info *sbi = ni->mi.sbi; struct ntfs_sb_info *sbi = ni->mi.sbi;
u8 cluster_bits = sbi->cluster_bits; u8 cluster_bits = sbi->cluster_bits;
bool is_mft = bool is_mft = ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA &&
ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len; !name_len;
u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp; u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
struct ATTRIB *attr = NULL, *attr_b; struct ATTRIB *attr = NULL, *attr_b;
struct ATTR_LIST_ENTRY *le, *le_b; struct ATTR_LIST_ENTRY *le, *le_b;
...@@ -531,11 +531,10 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, ...@@ -531,11 +531,10 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
pre_alloc = 0; pre_alloc = 0;
if (type == ATTR_DATA && !name_len && if (type == ATTR_DATA && !name_len &&
sbi->options->prealloc) { sbi->options->prealloc) {
pre_alloc = pre_alloc = bytes_to_cluster(
bytes_to_cluster( sbi, get_pre_allocated(
sbi, new_size)) -
get_pre_allocated(new_size)) - new_alen;
new_alen;
} }
/* Get the last LCN to allocate from. */ /* Get the last LCN to allocate from. */
...@@ -573,8 +572,8 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, ...@@ -573,8 +572,8 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
err = attr_allocate_clusters( err = attr_allocate_clusters(
sbi, run, vcn, lcn, to_allocate, &pre_alloc, sbi, run, vcn, lcn, to_allocate, &pre_alloc,
is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen, is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen,
is_mft ? 0 is_mft ? 0 :
: (sbi->record_size - (sbi->record_size -
le32_to_cpu(rec->used) + 8) / le32_to_cpu(rec->used) + 8) /
3 + 3 +
1, 1,
......
...@@ -40,9 +40,9 @@ static struct kmem_cache *ntfs_enode_cachep; ...@@ -40,9 +40,9 @@ static struct kmem_cache *ntfs_enode_cachep;
int __init ntfs3_init_bitmap(void) int __init ntfs3_init_bitmap(void)
{ {
ntfs_enode_cachep = ntfs_enode_cachep = kmem_cache_create("ntfs3_enode_cache",
kmem_cache_create("ntfs3_enode_cache", sizeof(struct e_node), 0, sizeof(struct e_node), 0,
SLAB_RECLAIM_ACCOUNT, NULL); SLAB_RECLAIM_ACCOUNT, NULL);
return ntfs_enode_cachep ? 0 : -ENOMEM; return ntfs_enode_cachep ? 0 : -ENOMEM;
} }
...@@ -286,9 +286,9 @@ static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len, ...@@ -286,9 +286,9 @@ static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len,
if (wnd->uptodated != 1) { if (wnd->uptodated != 1) {
/* Check bits before 'bit'. */ /* Check bits before 'bit'. */
ib = wnd->zone_bit == wnd->zone_end || ib = wnd->zone_bit == wnd->zone_end ||
bit < wnd->zone_end bit < wnd->zone_end ?
? 0 0 :
: wnd->zone_end; wnd->zone_end;
while (bit > ib && wnd_is_free_hlp(wnd, bit - 1, 1)) { while (bit > ib && wnd_is_free_hlp(wnd, bit - 1, 1)) {
bit -= 1; bit -= 1;
...@@ -297,9 +297,9 @@ static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len, ...@@ -297,9 +297,9 @@ static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len,
/* Check bits after 'end_in'. */ /* Check bits after 'end_in'. */
ib = wnd->zone_bit == wnd->zone_end || ib = wnd->zone_bit == wnd->zone_end ||
end_in > wnd->zone_bit end_in > wnd->zone_bit ?
? wnd->nbits wnd->nbits :
: wnd->zone_bit; wnd->zone_bit;
while (end_in < ib && wnd_is_free_hlp(wnd, end_in, 1)) { while (end_in < ib && wnd_is_free_hlp(wnd, end_in, 1)) {
end_in += 1; end_in += 1;
...@@ -417,8 +417,8 @@ static void wnd_remove_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len) ...@@ -417,8 +417,8 @@ static void wnd_remove_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len)
return; return;
n3 = rb_first(&wnd->count_tree); n3 = rb_first(&wnd->count_tree);
wnd->extent_max = wnd->extent_max =
n3 ? rb_entry(n3, struct e_node, count.node)->count.key n3 ? rb_entry(n3, struct e_node, count.node)->count.key :
: 0; 0;
return; return;
} }
......
...@@ -22,20 +22,21 @@ static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg) ...@@ -22,20 +22,21 @@ static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
{ {
struct fstrim_range __user *user_range; struct fstrim_range __user *user_range;
struct fstrim_range range; struct fstrim_range range;
struct block_device *dev;
int err; int err;
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
if (!bdev_max_discard_sectors(sbi->sb->s_bdev)) dev = sbi->sb->s_bdev;
if (!bdev_max_discard_sectors(dev))
return -EOPNOTSUPP; return -EOPNOTSUPP;
user_range = (struct fstrim_range __user *)arg; user_range = (struct fstrim_range __user *)arg;
if (copy_from_user(&range, user_range, sizeof(range))) if (copy_from_user(&range, user_range, sizeof(range)))
return -EFAULT; return -EFAULT;
range.minlen = max_t(u32, range.minlen, range.minlen = max_t(u32, range.minlen, bdev_discard_granularity(dev));
bdev_discard_granularity(sbi->sb->s_bdev));
err = ntfs_trim_fs(sbi, &range); err = ntfs_trim_fs(sbi, &range);
if (err < 0) if (err < 0)
...@@ -190,8 +191,8 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to) ...@@ -190,8 +191,8 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
for (; idx < idx_end; idx += 1, from = 0) { for (; idx < idx_end; idx += 1, from = 0) {
page_off = (loff_t)idx << PAGE_SHIFT; page_off = (loff_t)idx << PAGE_SHIFT;
to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) :
: PAGE_SIZE; PAGE_SIZE;
iblock = page_off >> inode->i_blkbits; iblock = page_off >> inode->i_blkbits;
page = find_or_create_page(mapping, idx, page = find_or_create_page(mapping, idx,
...@@ -564,13 +565,14 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) ...@@ -564,13 +565,14 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
ni_unlock(ni); ni_unlock(ni);
} else { } else {
/* Check new size. */ /* Check new size. */
u8 cluster_bits = sbi->cluster_bits;
/* generic/213: expected -ENOSPC instead of -EFBIG. */ /* generic/213: expected -ENOSPC instead of -EFBIG. */
if (!is_supported_holes) { if (!is_supported_holes) {
loff_t to_alloc = new_size - inode_get_bytes(inode); loff_t to_alloc = new_size - inode_get_bytes(inode);
if (to_alloc > 0 && if (to_alloc > 0 &&
(to_alloc >> sbi->cluster_bits) > (to_alloc >> cluster_bits) >
wnd_zeroes(&sbi->used.bitmap)) { wnd_zeroes(&sbi->used.bitmap)) {
err = -ENOSPC; err = -ENOSPC;
goto out; goto out;
...@@ -591,7 +593,7 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) ...@@ -591,7 +593,7 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
} }
if (is_supported_holes) { if (is_supported_holes) {
CLST vcn = vbo >> sbi->cluster_bits; CLST vcn = vbo >> cluster_bits;
CLST cend = bytes_to_cluster(sbi, end); CLST cend = bytes_to_cluster(sbi, end);
CLST cend_v = bytes_to_cluster(sbi, ni->i_valid); CLST cend_v = bytes_to_cluster(sbi, ni->i_valid);
CLST lcn, clen; CLST lcn, clen;
...@@ -1049,8 +1051,8 @@ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) ...@@ -1049,8 +1051,8 @@ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (ret) if (ret)
goto out; goto out;
ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) :
: __generic_file_write_iter(iocb, from); __generic_file_write_iter(iocb, from);
out: out:
inode_unlock(inode); inode_unlock(inode);
...@@ -1102,8 +1104,9 @@ static int ntfs_file_release(struct inode *inode, struct file *file) ...@@ -1102,8 +1104,9 @@ static int ntfs_file_release(struct inode *inode, struct file *file)
int err = 0; int err = 0;
/* If we are last writer on the inode, drop the block reservation. */ /* If we are last writer on the inode, drop the block reservation. */
if (sbi->options->prealloc && ((file->f_mode & FMODE_WRITE) && if (sbi->options->prealloc &&
atomic_read(&inode->i_writecount) == 1)) { ((file->f_mode & FMODE_WRITE) &&
atomic_read(&inode->i_writecount) == 1)) {
ni_lock(ni); ni_lock(ni);
down_write(&ni->file.run_lock); down_write(&ni->file.run_lock);
......
...@@ -76,8 +76,8 @@ struct ATTR_STD_INFO *ni_std(struct ntfs_inode *ni) ...@@ -76,8 +76,8 @@ struct ATTR_STD_INFO *ni_std(struct ntfs_inode *ni)
const struct ATTRIB *attr; const struct ATTRIB *attr;
attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL); attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO)) return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO)) :
: NULL; NULL;
} }
/* /*
...@@ -91,8 +91,8 @@ struct ATTR_STD_INFO5 *ni_std5(struct ntfs_inode *ni) ...@@ -91,8 +91,8 @@ struct ATTR_STD_INFO5 *ni_std5(struct ntfs_inode *ni)
attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL); attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO5)) return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO5)) :
: NULL; NULL;
} }
/* /*
...@@ -1439,8 +1439,8 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type, ...@@ -1439,8 +1439,8 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
int err; int err;
CLST plen; CLST plen;
struct ATTRIB *attr; struct ATTRIB *attr;
bool is_ext = bool is_ext = (flags & (ATTR_FLAG_SPARSED | ATTR_FLAG_COMPRESSED)) &&
(flags & (ATTR_FLAG_SPARSED | ATTR_FLAG_COMPRESSED)) && !svcn; !svcn;
u32 name_size = ALIGN(name_len * sizeof(short), 8); u32 name_size = ALIGN(name_len * sizeof(short), 8);
u32 name_off = is_ext ? SIZEOF_NONRESIDENT_EX : SIZEOF_NONRESIDENT; u32 name_off = is_ext ? SIZEOF_NONRESIDENT_EX : SIZEOF_NONRESIDENT;
u32 run_off = name_off + name_size; u32 run_off = name_off + name_size;
...@@ -1756,9 +1756,9 @@ int ni_new_attr_flags(struct ntfs_inode *ni, enum FILE_ATTRIBUTE new_fa) ...@@ -1756,9 +1756,9 @@ int ni_new_attr_flags(struct ntfs_inode *ni, enum FILE_ATTRIBUTE new_fa)
} }
/* Resize nonresident empty attribute in-place only. */ /* Resize nonresident empty attribute in-place only. */
new_asize = (new_aflags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) new_asize = (new_aflags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) ?
? (SIZEOF_NONRESIDENT_EX + 8) (SIZEOF_NONRESIDENT_EX + 8) :
: (SIZEOF_NONRESIDENT + 8); (SIZEOF_NONRESIDENT + 8);
if (!mi_resize_attr(mi, attr, new_asize - le32_to_cpu(attr->size))) if (!mi_resize_attr(mi, attr, new_asize - le32_to_cpu(attr->size)))
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -2965,14 +2965,14 @@ bool ni_remove_name_undo(struct ntfs_inode *dir_ni, struct ntfs_inode *ni, ...@@ -2965,14 +2965,14 @@ bool ni_remove_name_undo(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
{ {
struct ntfs_sb_info *sbi = ni->mi.sbi; struct ntfs_sb_info *sbi = ni->mi.sbi;
struct ATTRIB *attr; struct ATTRIB *attr;
u16 de_key_size = de2 ? le16_to_cpu(de2->key_size) : 0; u16 de_key_size;
switch (undo_step) { switch (undo_step) {
case 4: case 4:
de_key_size = le16_to_cpu(de2->key_size);
if (ni_insert_resident(ni, de_key_size, ATTR_NAME, NULL, 0, if (ni_insert_resident(ni, de_key_size, ATTR_NAME, NULL, 0,
&attr, NULL, NULL)) { &attr, NULL, NULL))
return false; return false;
}
memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), de2 + 1, de_key_size); memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), de2 + 1, de_key_size);
mi_get_ref(&ni->mi, &de2->ref); mi_get_ref(&ni->mi, &de2->ref);
...@@ -2981,19 +2981,16 @@ bool ni_remove_name_undo(struct ntfs_inode *dir_ni, struct ntfs_inode *ni, ...@@ -2981,19 +2981,16 @@ bool ni_remove_name_undo(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
de2->flags = 0; de2->flags = 0;
de2->res = 0; de2->res = 0;
if (indx_insert_entry(&dir_ni->dir, dir_ni, de2, sbi, NULL, if (indx_insert_entry(&dir_ni->dir, dir_ni, de2, sbi, NULL, 1))
1)) {
return false; return false;
}
fallthrough; fallthrough;
case 2: case 2:
de_key_size = le16_to_cpu(de->key_size); de_key_size = le16_to_cpu(de->key_size);
if (ni_insert_resident(ni, de_key_size, ATTR_NAME, NULL, 0, if (ni_insert_resident(ni, de_key_size, ATTR_NAME, NULL, 0,
&attr, NULL, NULL)) { &attr, NULL, NULL))
return false; return false;
}
memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), de + 1, de_key_size); memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), de + 1, de_key_size);
mi_get_ref(&ni->mi, &de->ref); mi_get_ref(&ni->mi, &de->ref);
...@@ -3162,9 +3159,9 @@ static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup, ...@@ -3162,9 +3159,9 @@ static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup,
u64 data_size = le64_to_cpu(attr->nres.data_size); u64 data_size = le64_to_cpu(attr->nres.data_size);
__le64 valid_le; __le64 valid_le;
dup->alloc_size = is_attr_ext(attr) dup->alloc_size = is_attr_ext(attr) ?
? attr->nres.total_size attr->nres.total_size :
: attr->nres.alloc_size; attr->nres.alloc_size;
dup->data_size = attr->nres.data_size; dup->data_size = attr->nres.data_size;
if (new_valid > data_size) if (new_valid > data_size)
......
...@@ -827,10 +827,10 @@ static inline struct RESTART_TABLE *extend_rsttbl(struct RESTART_TABLE *tbl, ...@@ -827,10 +827,10 @@ static inline struct RESTART_TABLE *extend_rsttbl(struct RESTART_TABLE *tbl,
memcpy(rt + 1, tbl + 1, esize * used); memcpy(rt + 1, tbl + 1, esize * used);
rt->free_goal = free_goal == ~0u rt->free_goal = free_goal == ~0u ?
? cpu_to_le32(~0u) cpu_to_le32(~0u) :
: cpu_to_le32(sizeof(struct RESTART_TABLE) + cpu_to_le32(sizeof(struct RESTART_TABLE) +
free_goal * esize); free_goal * esize);
if (tbl->first_free) { if (tbl->first_free) {
rt->first_free = tbl->first_free; rt->first_free = tbl->first_free;
...@@ -1089,9 +1089,9 @@ static inline u64 base_lsn(struct ntfs_log *log, ...@@ -1089,9 +1089,9 @@ static inline u64 base_lsn(struct ntfs_log *log,
(lsn < (lsn_to_vbo(log, h_lsn) & ~log->page_mask) ? 1 : 0)) (lsn < (lsn_to_vbo(log, h_lsn) & ~log->page_mask) ? 1 : 0))
<< log->file_data_bits) + << log->file_data_bits) +
((((is_log_record_end(hdr) && ((((is_log_record_end(hdr) &&
h_lsn <= le64_to_cpu(hdr->record_hdr.last_end_lsn)) h_lsn <= le64_to_cpu(hdr->record_hdr.last_end_lsn)) ?
? le16_to_cpu(hdr->record_hdr.next_record_off) le16_to_cpu(hdr->record_hdr.next_record_off) :
: log->page_size) + log->page_size) +
lsn) >> lsn) >>
3); 3);
...@@ -1298,9 +1298,9 @@ static void log_init_pg_hdr(struct ntfs_log *log, u32 sys_page_size, ...@@ -1298,9 +1298,9 @@ static void log_init_pg_hdr(struct ntfs_log *log, u32 sys_page_size,
if (!log->clst_per_page) if (!log->clst_per_page)
log->clst_per_page = 1; log->clst_per_page = 1;
log->first_page = major_ver >= 2 log->first_page = major_ver >= 2 ?
? 0x22 * page_size 0x22 * page_size :
: ((sys_page_size << 1) + (page_size << 1)); ((sys_page_size << 1) + (page_size << 1));
log->major_ver = major_ver; log->major_ver = major_ver;
log->minor_ver = minor_ver; log->minor_ver = minor_ver;
} }
...@@ -1512,20 +1512,19 @@ static u32 current_log_avail(struct ntfs_log *log) ...@@ -1512,20 +1512,19 @@ static u32 current_log_avail(struct ntfs_log *log)
* have to compute the free range. * have to compute the free range.
* If there is no oldest lsn then start at the first page of the file. * If there is no oldest lsn then start at the first page of the file.
*/ */
oldest_off = (log->l_flags & NTFSLOG_NO_OLDEST_LSN) oldest_off = (log->l_flags & NTFSLOG_NO_OLDEST_LSN) ?
? log->first_page log->first_page :
: (log->oldest_lsn_off & ~log->sys_page_mask); (log->oldest_lsn_off & ~log->sys_page_mask);
/* /*
* We will use the next log page offset to compute the next free page. * We will use the next log page offset to compute the next free page.
* If we are going to reuse this page go to the next page. * If we are going to reuse this page go to the next page.
* If we are at the first page then use the end of the file. * If we are at the first page then use the end of the file.
*/ */
next_free_off = (log->l_flags & NTFSLOG_REUSE_TAIL) next_free_off = (log->l_flags & NTFSLOG_REUSE_TAIL) ?
? log->next_page + log->page_size log->next_page + log->page_size :
: log->next_page == log->first_page log->next_page == log->first_page ? log->l_size :
? log->l_size log->next_page;
: log->next_page;
/* If the two offsets are the same then there is no available space. */ /* If the two offsets are the same then there is no available space. */
if (oldest_off == next_free_off) if (oldest_off == next_free_off)
...@@ -1535,9 +1534,9 @@ static u32 current_log_avail(struct ntfs_log *log) ...@@ -1535,9 +1534,9 @@ static u32 current_log_avail(struct ntfs_log *log)
* this range from the total available pages. * this range from the total available pages.
*/ */
free_bytes = free_bytes =
oldest_off < next_free_off oldest_off < next_free_off ?
? log->total_avail_pages - (next_free_off - oldest_off) log->total_avail_pages - (next_free_off - oldest_off) :
: oldest_off - next_free_off; oldest_off - next_free_off;
free_bytes >>= log->page_bits; free_bytes >>= log->page_bits;
return free_bytes * log->reserved; return free_bytes * log->reserved;
...@@ -1671,8 +1670,8 @@ static int last_log_lsn(struct ntfs_log *log) ...@@ -1671,8 +1670,8 @@ static int last_log_lsn(struct ntfs_log *log)
} }
best_lsn1 = first_tail ? base_lsn(log, first_tail, first_file_off) : 0; best_lsn1 = first_tail ? base_lsn(log, first_tail, first_file_off) : 0;
best_lsn2 = best_lsn2 = second_tail ? base_lsn(log, second_tail, second_file_off) :
second_tail ? base_lsn(log, second_tail, second_file_off) : 0; 0;
if (first_tail && second_tail) { if (first_tail && second_tail) {
if (best_lsn1 > best_lsn2) { if (best_lsn1 > best_lsn2) {
...@@ -1767,8 +1766,8 @@ static int last_log_lsn(struct ntfs_log *log) ...@@ -1767,8 +1766,8 @@ static int last_log_lsn(struct ntfs_log *log)
page_cnt = page_pos = 1; page_cnt = page_pos = 1;
curpage_off = seq_base == log->seq_num ? min(log->next_page, page_off) curpage_off = seq_base == log->seq_num ? min(log->next_page, page_off) :
: log->next_page; log->next_page;
wrapped_file = wrapped_file =
curpage_off == log->first_page && curpage_off == log->first_page &&
...@@ -1826,9 +1825,9 @@ static int last_log_lsn(struct ntfs_log *log) ...@@ -1826,9 +1825,9 @@ static int last_log_lsn(struct ntfs_log *log)
le64_to_cpu(cur_page->record_hdr.last_end_lsn) && le64_to_cpu(cur_page->record_hdr.last_end_lsn) &&
((lsn_cur >> log->file_data_bits) + ((lsn_cur >> log->file_data_bits) +
((curpage_off < ((curpage_off <
(lsn_to_vbo(log, lsn_cur) & ~log->page_mask)) (lsn_to_vbo(log, lsn_cur) & ~log->page_mask)) ?
? 1 1 :
: 0)) != expected_seq) { 0)) != expected_seq) {
goto check_tail; goto check_tail;
} }
...@@ -2642,9 +2641,10 @@ static inline bool check_index_root(const struct ATTRIB *attr, ...@@ -2642,9 +2641,10 @@ static inline bool check_index_root(const struct ATTRIB *attr,
{ {
bool ret; bool ret;
const struct INDEX_ROOT *root = resident_data(attr); const struct INDEX_ROOT *root = resident_data(attr);
u8 index_bits = le32_to_cpu(root->index_block_size) >= sbi->cluster_size u8 index_bits = le32_to_cpu(root->index_block_size) >=
? sbi->cluster_bits sbi->cluster_size ?
: SECTOR_SHIFT; sbi->cluster_bits :
SECTOR_SHIFT;
u8 block_clst = root->index_block_clst; u8 block_clst = root->index_block_clst;
if (le32_to_cpu(attr->res.data_size) < sizeof(struct INDEX_ROOT) || if (le32_to_cpu(attr->res.data_size) < sizeof(struct INDEX_ROOT) ||
...@@ -3683,7 +3683,8 @@ static int do_action(struct ntfs_log *log, struct OPEN_ATTR_ENRTY *oe, ...@@ -3683,7 +3683,8 @@ static int do_action(struct ntfs_log *log, struct OPEN_ATTR_ENRTY *oe,
if (a_dirty) { if (a_dirty) {
attr = oa->attr; attr = oa->attr;
err = ntfs_sb_write_run(sbi, oa->run1, vbo, buffer_le, bytes, 0); err = ntfs_sb_write_run(sbi, oa->run1, vbo, buffer_le, bytes,
0);
if (err) if (err)
goto out; goto out;
} }
...@@ -3768,11 +3769,10 @@ int log_replay(struct ntfs_inode *ni, bool *initialized) ...@@ -3768,11 +3769,10 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
if (!log) if (!log)
return -ENOMEM; return -ENOMEM;
memset(&rst_info, 0, sizeof(struct restart_info));
log->ni = ni; log->ni = ni;
log->l_size = l_size; log->l_size = l_size;
log->one_page_buf = kmalloc(page_size, GFP_NOFS); log->one_page_buf = kmalloc(page_size, GFP_NOFS);
if (!log->one_page_buf) { if (!log->one_page_buf) {
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
...@@ -3783,6 +3783,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized) ...@@ -3783,6 +3783,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
log->page_bits = blksize_bits(page_size); log->page_bits = blksize_bits(page_size);
/* Look for a restart area on the disk. */ /* Look for a restart area on the disk. */
memset(&rst_info, 0, sizeof(struct restart_info));
err = log_read_rst(log, l_size, true, &rst_info); err = log_read_rst(log, l_size, true, &rst_info);
if (err) if (err)
goto out; goto out;
...@@ -3859,10 +3860,10 @@ int log_replay(struct ntfs_inode *ni, bool *initialized) ...@@ -3859,10 +3860,10 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
log->init_ra = !!rst_info.vbo; log->init_ra = !!rst_info.vbo;
/* If we have a valid page then grab a pointer to the restart area. */ /* If we have a valid page then grab a pointer to the restart area. */
ra2 = rst_info.valid_page ra2 = rst_info.valid_page ?
? Add2Ptr(rst_info.r_page, Add2Ptr(rst_info.r_page,
le16_to_cpu(rst_info.r_page->ra_off)) le16_to_cpu(rst_info.r_page->ra_off)) :
: NULL; NULL;
if (rst_info.chkdsk_was_run || if (rst_info.chkdsk_was_run ||
(ra2 && ra2->client_idx[1] == LFS_NO_CLIENT_LE)) { (ra2 && ra2->client_idx[1] == LFS_NO_CLIENT_LE)) {
......
...@@ -172,8 +172,8 @@ int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes, ...@@ -172,8 +172,8 @@ int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
u16 sample, fo, fn; u16 sample, fo, fn;
fo = le16_to_cpu(rhdr->fix_off); fo = le16_to_cpu(rhdr->fix_off);
fn = simple ? ((bytes >> SECTOR_SHIFT) + 1) fn = simple ? ((bytes >> SECTOR_SHIFT) + 1) :
: le16_to_cpu(rhdr->fix_num); le16_to_cpu(rhdr->fix_num);
/* Check errors. */ /* Check errors. */
if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- || if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
...@@ -849,14 +849,13 @@ void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait) ...@@ -849,14 +849,13 @@ void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
u32 blocksize, bytes; u32 blocksize, bytes;
sector_t block1, block2; sector_t block1, block2;
if (!sb) /*
* sb can be NULL here. In this case sbi->flags should be 0 too.
*/
if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR))
return; return;
blocksize = sb->s_blocksize; blocksize = sb->s_blocksize;
if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
return;
bytes = sbi->mft.recs_mirr << sbi->record_bits; bytes = sbi->mft.recs_mirr << sbi->record_bits;
block1 = sbi->mft.lbo >> sb->s_blocksize_bits; block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits; block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
......
...@@ -431,8 +431,9 @@ static int scan_nres_bitmap(struct ntfs_inode *ni, struct ATTRIB *bitmap, ...@@ -431,8 +431,9 @@ static int scan_nres_bitmap(struct ntfs_inode *ni, struct ATTRIB *bitmap,
if (vbo + blocksize > data_size) if (vbo + blocksize > data_size)
nbits = 8 * (data_size - vbo); nbits = 8 * (data_size - vbo);
ok = nbits > from ? (*fn)((ulong *)bh->b_data, from, nbits, ret) ok = nbits > from ?
: false; (*fn)((ulong *)bh->b_data, from, nbits, ret) :
false;
put_bh(bh); put_bh(bh);
if (ok) { if (ok) {
...@@ -764,8 +765,7 @@ static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx, ...@@ -764,8 +765,7 @@ static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx,
return NULL; return NULL;
max_idx = 0; max_idx = 0;
table_size = min(table_size * 2, table_size = min(table_size * 2, (int)ARRAY_SIZE(offs));
(int)ARRAY_SIZE(offs));
goto fill_table; goto fill_table;
} }
} else if (diff2 < 0) { } else if (diff2 < 0) {
...@@ -1170,8 +1170,10 @@ int indx_find(struct ntfs_index *indx, struct ntfs_inode *ni, ...@@ -1170,8 +1170,10 @@ int indx_find(struct ntfs_index *indx, struct ntfs_inode *ni,
/* Read next level. */ /* Read next level. */
err = indx_read(indx, ni, de_get_vbn(e), &node); err = indx_read(indx, ni, de_get_vbn(e), &node);
if (err) if (err) {
/* io error? */
return err; return err;
}
/* Lookup entry that is <= to the search value. */ /* Lookup entry that is <= to the search value. */
e = hdr_find_e(indx, &node->index->ihdr, key, key_len, ctx, e = hdr_find_e(indx, &node->index->ihdr, key, key_len, ctx,
...@@ -1673,9 +1675,9 @@ static int indx_insert_into_root(struct ntfs_index *indx, struct ntfs_inode *ni, ...@@ -1673,9 +1675,9 @@ static int indx_insert_into_root(struct ntfs_index *indx, struct ntfs_inode *ni,
mi->dirty = true; mi->dirty = true;
/* Create alloc and bitmap attributes (if not). */ /* Create alloc and bitmap attributes (if not). */
err = run_is_empty(&indx->alloc_run) err = run_is_empty(&indx->alloc_run) ?
? indx_create_allocate(indx, ni, &new_vbn) indx_create_allocate(indx, ni, &new_vbn) :
: indx_add_allocate(indx, ni, &new_vbn); indx_add_allocate(indx, ni, &new_vbn);
/* Layout of record may be changed, so rescan root. */ /* Layout of record may be changed, so rescan root. */
root = indx_get_root(indx, ni, &attr, &mi); root = indx_get_root(indx, ni, &attr, &mi);
...@@ -1865,9 +1867,9 @@ indx_insert_into_buffer(struct ntfs_index *indx, struct ntfs_inode *ni, ...@@ -1865,9 +1867,9 @@ indx_insert_into_buffer(struct ntfs_index *indx, struct ntfs_inode *ni,
hdr_insert_de(indx, hdr_insert_de(indx,
(*indx->cmp)(new_de + 1, le16_to_cpu(new_de->key_size), (*indx->cmp)(new_de + 1, le16_to_cpu(new_de->key_size),
up_e + 1, le16_to_cpu(up_e->key_size), up_e + 1, le16_to_cpu(up_e->key_size),
ctx) < 0 ctx) < 0 ?
? hdr2 hdr2 :
: hdr1, hdr1,
new_de, NULL, ctx); new_de, NULL, ctx);
indx_mark_used(indx, ni, new_vbn >> indx->idx2vbn_bits); indx_mark_used(indx, ni, new_vbn >> indx->idx2vbn_bits);
...@@ -2337,8 +2339,8 @@ int indx_delete_entry(struct ntfs_index *indx, struct ntfs_inode *ni, ...@@ -2337,8 +2339,8 @@ int indx_delete_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
err = level ? indx_insert_into_buffer(indx, ni, root, err = level ? indx_insert_into_buffer(indx, ni, root,
re, ctx, re, ctx,
fnd->level - 1, fnd->level - 1,
fnd) fnd) :
: indx_insert_into_root(indx, ni, re, e, indx_insert_into_root(indx, ni, re, e,
ctx, fnd, 0); ctx, fnd, 0);
kfree(re); kfree(re);
......
...@@ -262,8 +262,8 @@ static struct inode *ntfs_read_mft(struct inode *inode, ...@@ -262,8 +262,8 @@ static struct inode *ntfs_read_mft(struct inode *inode,
if (!attr->nres.alloc_size) if (!attr->nres.alloc_size)
goto next_attr; goto next_attr;
run = ino == MFT_REC_BITMAP ? &sbi->used.bitmap.run run = ino == MFT_REC_BITMAP ? &sbi->used.bitmap.run :
: &ni->file.run; &ni->file.run;
break; break;
case ATTR_ROOT: case ATTR_ROOT:
...@@ -290,9 +290,9 @@ static struct inode *ntfs_read_mft(struct inode *inode, ...@@ -290,9 +290,9 @@ static struct inode *ntfs_read_mft(struct inode *inode,
if (err) if (err)
goto out; goto out;
mode = sb->s_root mode = sb->s_root ?
? (S_IFDIR | (0777 & sbi->options->fs_dmask_inv)) (S_IFDIR | (0777 & sbi->options->fs_dmask_inv)) :
: (S_IFDIR | 0777); (S_IFDIR | 0777);
goto next_attr; goto next_attr;
case ATTR_ALLOC: case ATTR_ALLOC:
...@@ -449,8 +449,8 @@ static struct inode *ntfs_read_mft(struct inode *inode, ...@@ -449,8 +449,8 @@ static struct inode *ntfs_read_mft(struct inode *inode,
ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY; ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
inode->i_op = &ntfs_file_inode_operations; inode->i_op = &ntfs_file_inode_operations;
inode->i_fop = &ntfs_file_operations; inode->i_fop = &ntfs_file_operations;
inode->i_mapping->a_ops = inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr :
is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops; &ntfs_aops;
if (ino != MFT_REC_MFT) if (ino != MFT_REC_MFT)
init_rwsem(&ni->file.run_lock); init_rwsem(&ni->file.run_lock);
} else if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) || } else if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) ||
...@@ -786,8 +786,8 @@ static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) ...@@ -786,8 +786,8 @@ static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
} }
ret = blockdev_direct_IO(iocb, inode, iter, ret = blockdev_direct_IO(iocb, inode, iter,
wr ? ntfs_get_block_direct_IO_W wr ? ntfs_get_block_direct_IO_W :
: ntfs_get_block_direct_IO_R); ntfs_get_block_direct_IO_R);
if (ret > 0) if (ret > 0)
end = vbo + ret; end = vbo + ret;
...@@ -846,7 +846,7 @@ int ntfs_set_size(struct inode *inode, u64 new_size) ...@@ -846,7 +846,7 @@ int ntfs_set_size(struct inode *inode, u64 new_size)
} }
static int ntfs_resident_writepage(struct folio *folio, static int ntfs_resident_writepage(struct folio *folio,
struct writeback_control *wbc, void *data) struct writeback_control *wbc, void *data)
{ {
struct address_space *mapping = data; struct address_space *mapping = data;
struct ntfs_inode *ni = ntfs_i(mapping->host); struct ntfs_inode *ni = ntfs_i(mapping->host);
...@@ -887,8 +887,8 @@ int ntfs_write_begin(struct file *file, struct address_space *mapping, ...@@ -887,8 +887,8 @@ int ntfs_write_begin(struct file *file, struct address_space *mapping,
*pagep = NULL; *pagep = NULL;
if (is_resident(ni)) { if (is_resident(ni)) {
struct page *page = grab_cache_page_write_begin( struct page *page =
mapping, pos >> PAGE_SHIFT); grab_cache_page_write_begin(mapping, pos >> PAGE_SHIFT);
if (!page) { if (!page) {
err = -ENOMEM; err = -ENOMEM;
...@@ -920,9 +920,8 @@ int ntfs_write_begin(struct file *file, struct address_space *mapping, ...@@ -920,9 +920,8 @@ int ntfs_write_begin(struct file *file, struct address_space *mapping,
/* /*
* ntfs_write_end - Address_space_operations::write_end. * ntfs_write_end - Address_space_operations::write_end.
*/ */
int ntfs_write_end(struct file *file, struct address_space *mapping, int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
loff_t pos, u32 len, u32 copied, struct page *page, u32 len, u32 copied, struct page *page, void *fsdata)
void *fsdata)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct ntfs_inode *ni = ntfs_i(inode); struct ntfs_inode *ni = ntfs_i(inode);
...@@ -1605,8 +1604,8 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap, ...@@ -1605,8 +1604,8 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap,
} else if (S_ISREG(mode)) { } else if (S_ISREG(mode)) {
inode->i_op = &ntfs_file_inode_operations; inode->i_op = &ntfs_file_inode_operations;
inode->i_fop = &ntfs_file_operations; inode->i_fop = &ntfs_file_operations;
inode->i_mapping->a_ops = inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr :
is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops; &ntfs_aops;
init_rwsem(&ni->file.run_lock); init_rwsem(&ni->file.run_lock);
} else { } else {
inode->i_op = &ntfs_special_inode_operations; inode->i_op = &ntfs_special_inode_operations;
......
...@@ -296,8 +296,8 @@ static inline ssize_t decompress_chunk(u8 *unc, u8 *unc_end, const u8 *cmpr, ...@@ -296,8 +296,8 @@ static inline ssize_t decompress_chunk(u8 *unc, u8 *unc_end, const u8 *cmpr,
*/ */
struct lznt *get_lznt_ctx(int level) struct lznt *get_lznt_ctx(int level)
{ {
struct lznt *r = kzalloc(level ? offsetof(struct lznt, hash) struct lznt *r = kzalloc(level ? offsetof(struct lznt, hash) :
: sizeof(struct lznt), sizeof(struct lznt),
GFP_NOFS); GFP_NOFS);
if (r) if (r)
...@@ -392,9 +392,9 @@ ssize_t decompress_lznt(const void *cmpr, size_t cmpr_size, void *unc, ...@@ -392,9 +392,9 @@ ssize_t decompress_lznt(const void *cmpr, size_t cmpr_size, void *unc,
unc_use = err; unc_use = err;
} else { } else {
/* This chunk does not contain compressed data. */ /* This chunk does not contain compressed data. */
unc_use = unc_chunk + LZNT_CHUNK_SIZE > unc_end unc_use = unc_chunk + LZNT_CHUNK_SIZE > unc_end ?
? unc_end - unc_chunk unc_end - unc_chunk :
: LZNT_CHUNK_SIZE; LZNT_CHUNK_SIZE;
if (cmpr_chunk + sizeof(chunk_hdr) + unc_use > if (cmpr_chunk + sizeof(chunk_hdr) + unc_use >
cmpr_end) { cmpr_end) {
......
...@@ -433,8 +433,8 @@ static int ntfs_atomic_open(struct inode *dir, struct dentry *dentry, ...@@ -433,8 +433,8 @@ static int ntfs_atomic_open(struct inode *dir, struct dentry *dentry,
inode = ntfs_create_inode(&nop_mnt_idmap, dir, dentry, uni, mode, 0, inode = ntfs_create_inode(&nop_mnt_idmap, dir, dentry, uni, mode, 0,
NULL, 0, fnd); NULL, 0, fnd);
err = IS_ERR(inode) ? PTR_ERR(inode) err = IS_ERR(inode) ? PTR_ERR(inode) :
: finish_open(file, dentry, ntfs_file_open); finish_open(file, dentry, ntfs_file_open);
dput(d); dput(d);
out2: out2:
......
...@@ -338,7 +338,7 @@ enum ntfs_inode_mutex_lock_class { ...@@ -338,7 +338,7 @@ enum ntfs_inode_mutex_lock_class {
}; };
/* /*
* sturct ntfs_inode * struct ntfs_inode
* *
* Ntfs inode - extends linux inode. consists of one or more MFT inodes. * Ntfs inode - extends linux inode. consists of one or more MFT inodes.
*/ */
...@@ -699,9 +699,8 @@ int ntfs_get_block(struct inode *inode, sector_t vbn, ...@@ -699,9 +699,8 @@ int ntfs_get_block(struct inode *inode, sector_t vbn,
struct buffer_head *bh_result, int create); struct buffer_head *bh_result, int create);
int ntfs_write_begin(struct file *file, struct address_space *mapping, int ntfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, u32 len, struct page **pagep, void **fsdata); loff_t pos, u32 len, struct page **pagep, void **fsdata);
int ntfs_write_end(struct file *file, struct address_space *mapping, int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
loff_t pos, u32 len, u32 copied, struct page *page, u32 len, u32 copied, struct page *page, void *fsdata);
void *fsdata);
int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc); int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc);
int ntfs_sync_inode(struct inode *inode); int ntfs_sync_inode(struct inode *inode);
int ntfs_flush_inodes(struct super_block *sb, struct inode *i1, int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
...@@ -858,7 +857,7 @@ unsigned long ntfs_names_hash(const u16 *name, size_t len, const u16 *upcase, ...@@ -858,7 +857,7 @@ unsigned long ntfs_names_hash(const u16 *name, size_t len, const u16 *upcase,
/* globals from xattr.c */ /* globals from xattr.c */
#ifdef CONFIG_NTFS3_FS_POSIX_ACL #ifdef CONFIG_NTFS3_FS_POSIX_ACL
struct posix_acl *ntfs_get_acl(struct mnt_idmap *idmap, struct posix_acl *ntfs_get_acl(struct mnt_idmap *idmap,
struct dentry *dentry, int type); struct dentry *dentry, int type);
int ntfs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, int ntfs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type); struct posix_acl *acl, int type);
int ntfs_init_acl(struct mnt_idmap *idmap, struct inode *inode, int ntfs_init_acl(struct mnt_idmap *idmap, struct inode *inode,
......
...@@ -419,10 +419,9 @@ struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type, ...@@ -419,10 +419,9 @@ struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
struct ntfs_sb_info *sbi = mi->sbi; struct ntfs_sb_info *sbi = mi->sbi;
u32 used = le32_to_cpu(rec->used); u32 used = le32_to_cpu(rec->used);
const u16 *upcase = sbi->upcase; const u16 *upcase = sbi->upcase;
int diff;
/* Can we insert mi attribute? */ /* Can we insert mi attribute? */
if (used + asize > mi->sbi->record_size) if (used + asize > sbi->record_size)
return NULL; return NULL;
/* /*
...@@ -431,7 +430,7 @@ struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type, ...@@ -431,7 +430,7 @@ struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
*/ */
attr = NULL; attr = NULL;
while ((attr = mi_enum_attr(mi, attr))) { while ((attr = mi_enum_attr(mi, attr))) {
diff = compare_attr(attr, type, name, name_len, upcase); int diff = compare_attr(attr, type, name, name_len, upcase);
if (diff < 0) if (diff < 0)
continue; continue;
...@@ -442,9 +441,11 @@ struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type, ...@@ -442,9 +441,11 @@ struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
} }
if (!attr) { if (!attr) {
tail = 8; /* Not used, just to suppress warning. */ /* Append. */
tail = 8;
attr = Add2Ptr(rec, used - 8); attr = Add2Ptr(rec, used - 8);
} else { } else {
/* Insert before 'attr'. */
tail = used - PtrOffset(rec, attr); tail = used - PtrOffset(rec, attr);
} }
......
...@@ -433,9 +433,9 @@ bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len, ...@@ -433,9 +433,9 @@ bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
should_add_tail = Tovcn < r->len; should_add_tail = Tovcn < r->len;
if (should_add_tail) { if (should_add_tail) {
tail_lcn = r->lcn == SPARSE_LCN tail_lcn = r->lcn == SPARSE_LCN ?
? SPARSE_LCN SPARSE_LCN :
: (r->lcn + Tovcn); (r->lcn + Tovcn);
tail_vcn = r->vcn + Tovcn; tail_vcn = r->vcn + Tovcn;
tail_len = r->len - Tovcn; tail_len = r->len - Tovcn;
} }
......
...@@ -39,10 +39,10 @@ ...@@ -39,10 +39,10 @@
* To mount large volumes as ntfs one should use large cluster size (up to 2M) * To mount large volumes as ntfs one should use large cluster size (up to 2M)
* The maximum volume size in this case is 2^32 * 2^21 = 2^53 = 8P * The maximum volume size in this case is 2^32 * 2^21 = 2^53 = 8P
* *
* ntfs limits, cluster size is 2M (2^31) * ntfs limits, cluster size is 2M (2^21)
* ----------------------------------------------------------------------------- * -----------------------------------------------------------------------------
* | < 8P, 2^54 | < 2^32 | yes | yes | yes | yes | yes | * | < 8P, 2^53 | < 2^32 | yes | yes | yes | yes | yes |
* | > 8P, 2^54 | > 2^32 | no | no | yes | yes | yes | * | > 8P, 2^53 | > 2^32 | no | no | yes | yes | yes |
* ----------------------------------------------------------|------------------ * ----------------------------------------------------------|------------------
* *
*/ */
...@@ -115,9 +115,9 @@ void ntfs_inode_printk(struct inode *inode, const char *fmt, ...) ...@@ -115,9 +115,9 @@ void ntfs_inode_printk(struct inode *inode, const char *fmt, ...)
return; return;
/* Use static allocated buffer, if possible. */ /* Use static allocated buffer, if possible. */
name = atomic_dec_and_test(&s_name_buf_cnt) name = atomic_dec_and_test(&s_name_buf_cnt) ?
? s_name_buf s_name_buf :
: kmalloc(sizeof(s_name_buf), GFP_NOFS); kmalloc(sizeof(s_name_buf), GFP_NOFS);
if (name) { if (name) {
struct dentry *de = d_find_alias(inode); struct dentry *de = d_find_alias(inode);
...@@ -369,7 +369,8 @@ static int ntfs_fs_parse_param(struct fs_context *fc, ...@@ -369,7 +369,8 @@ static int ntfs_fs_parse_param(struct fs_context *fc,
#ifdef CONFIG_NTFS3_FS_POSIX_ACL #ifdef CONFIG_NTFS3_FS_POSIX_ACL
fc->sb_flags |= SB_POSIXACL; fc->sb_flags |= SB_POSIXACL;
#else #else
return invalf(fc, "ntfs3: Support for ACL not compiled in!"); return invalf(
fc, "ntfs3: Support for ACL not compiled in!");
#endif #endif
else else
fc->sb_flags &= ~SB_POSIXACL; fc->sb_flags &= ~SB_POSIXACL;
...@@ -404,24 +405,29 @@ static int ntfs_fs_reconfigure(struct fs_context *fc) ...@@ -404,24 +405,29 @@ static int ntfs_fs_reconfigure(struct fs_context *fc)
ro_rw = sb_rdonly(sb) && !(fc->sb_flags & SB_RDONLY); ro_rw = sb_rdonly(sb) && !(fc->sb_flags & SB_RDONLY);
if (ro_rw && (sbi->flags & NTFS_FLAGS_NEED_REPLAY)) { if (ro_rw && (sbi->flags & NTFS_FLAGS_NEED_REPLAY)) {
errorf(fc, "ntfs3: Couldn't remount rw because journal is not replayed. Please umount/remount instead\n"); errorf(fc,
"ntfs3: Couldn't remount rw because journal is not replayed. Please umount/remount instead\n");
return -EINVAL; return -EINVAL;
} }
new_opts->nls = ntfs_load_nls(new_opts->nls_name); new_opts->nls = ntfs_load_nls(new_opts->nls_name);
if (IS_ERR(new_opts->nls)) { if (IS_ERR(new_opts->nls)) {
new_opts->nls = NULL; new_opts->nls = NULL;
errorf(fc, "ntfs3: Cannot load iocharset %s", new_opts->nls_name); errorf(fc, "ntfs3: Cannot load iocharset %s",
new_opts->nls_name);
return -EINVAL; return -EINVAL;
} }
if (new_opts->nls != sbi->options->nls) if (new_opts->nls != sbi->options->nls)
return invalf(fc, "ntfs3: Cannot use different iocharset when remounting!"); return invalf(
fc,
"ntfs3: Cannot use different iocharset when remounting!");
sync_filesystem(sb); sync_filesystem(sb);
if (ro_rw && (sbi->volume.flags & VOLUME_FLAG_DIRTY) && if (ro_rw && (sbi->volume.flags & VOLUME_FLAG_DIRTY) &&
!new_opts->force) { !new_opts->force) {
errorf(fc, "ntfs3: Volume is dirty and \"force\" flag is not set!"); errorf(fc,
"ntfs3: Volume is dirty and \"force\" flag is not set!");
return -EINVAL; return -EINVAL;
} }
...@@ -539,10 +545,8 @@ static int ntfs_show_options(struct seq_file *m, struct dentry *root) ...@@ -539,10 +545,8 @@ static int ntfs_show_options(struct seq_file *m, struct dentry *root)
struct ntfs_mount_options *opts = sbi->options; struct ntfs_mount_options *opts = sbi->options;
struct user_namespace *user_ns = seq_user_ns(m); struct user_namespace *user_ns = seq_user_ns(m);
seq_printf(m, ",uid=%u", seq_printf(m, ",uid=%u", from_kuid_munged(user_ns, opts->fs_uid));
from_kuid_munged(user_ns, opts->fs_uid)); seq_printf(m, ",gid=%u", from_kgid_munged(user_ns, opts->fs_gid));
seq_printf(m, ",gid=%u",
from_kgid_munged(user_ns, opts->fs_gid));
if (opts->fmask) if (opts->fmask)
seq_printf(m, ",fmask=%04o", opts->fs_fmask_inv ^ 0xffff); seq_printf(m, ",fmask=%04o", opts->fs_fmask_inv ^ 0xffff);
if (opts->dmask) if (opts->dmask)
...@@ -699,7 +703,7 @@ static u32 true_sectors_per_clst(const struct NTFS_BOOT *boot) ...@@ -699,7 +703,7 @@ static u32 true_sectors_per_clst(const struct NTFS_BOOT *boot)
if (boot->sectors_per_clusters <= 0x80) if (boot->sectors_per_clusters <= 0x80)
return boot->sectors_per_clusters; return boot->sectors_per_clusters;
if (boot->sectors_per_clusters >= 0xf4) /* limit shift to 2MB max */ if (boot->sectors_per_clusters >= 0xf4) /* limit shift to 2MB max */
return 1U << -(s8)boot->sectors_per_clusters; return 1U << (-(s8)boot->sectors_per_clusters);
return -EINVAL; return -EINVAL;
} }
...@@ -717,6 +721,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size, ...@@ -717,6 +721,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
struct buffer_head *bh; struct buffer_head *bh;
struct MFT_REC *rec; struct MFT_REC *rec;
u16 fn, ao; u16 fn, ao;
u8 cluster_bits;
sbi->volume.blocks = dev_size >> PAGE_SHIFT; sbi->volume.blocks = dev_size >> PAGE_SHIFT;
...@@ -784,7 +789,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size, ...@@ -784,7 +789,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
if (boot_sector_size != sector_size) { if (boot_sector_size != sector_size) {
ntfs_warn( ntfs_warn(
sb, sb,
"Different NTFS sector size (%u) and media sector size (%u)", "Different NTFS sector size (%u) and media sector size (%u).",
boot_sector_size, sector_size); boot_sector_size, sector_size);
dev_size += sector_size - 1; dev_size += sector_size - 1;
} }
...@@ -792,8 +797,8 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size, ...@@ -792,8 +797,8 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
sbi->cluster_size = boot_sector_size * sct_per_clst; sbi->cluster_size = boot_sector_size * sct_per_clst;
sbi->cluster_bits = blksize_bits(sbi->cluster_size); sbi->cluster_bits = blksize_bits(sbi->cluster_size);
sbi->mft.lbo = mlcn << sbi->cluster_bits; sbi->mft.lbo = mlcn << cluster_bits;
sbi->mft.lbo2 = mlcn2 << sbi->cluster_bits; sbi->mft.lbo2 = mlcn2 << cluster_bits;
/* Compare boot's cluster and sector. */ /* Compare boot's cluster and sector. */
if (sbi->cluster_size < boot_sector_size) if (sbi->cluster_size < boot_sector_size)
...@@ -804,7 +809,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size, ...@@ -804,7 +809,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
/* No way to use ntfs_get_block in this case. */ /* No way to use ntfs_get_block in this case. */
ntfs_err( ntfs_err(
sb, sb,
"Failed to mount 'cause NTFS's cluster size (%u) is less than media sector size (%u)", "Failed to mount 'cause NTFS's cluster size (%u) is less than media sector size (%u).",
sbi->cluster_size, sector_size); sbi->cluster_size, sector_size);
goto out; goto out;
} }
...@@ -840,18 +845,18 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size, ...@@ -840,18 +845,18 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
gb0 = format_size_gb(dev_size, &mb0); gb0 = format_size_gb(dev_size, &mb0);
ntfs_warn( ntfs_warn(
sb, sb,
"RAW NTFS volume: Filesystem size %u.%02u Gb > volume size %u.%02u Gb. Mount in read-only", "RAW NTFS volume: Filesystem size %u.%02u Gb > volume size %u.%02u Gb. Mount in read-only.",
gb, mb, gb0, mb0); gb, mb, gb0, mb0);
sb->s_flags |= SB_RDONLY; sb->s_flags |= SB_RDONLY;
} }
clusters = sbi->volume.size >> sbi->cluster_bits; clusters = sbi->volume.size >> cluster_bits;
#ifndef CONFIG_NTFS3_64BIT_CLUSTER #ifndef CONFIG_NTFS3_64BIT_CLUSTER
/* 32 bits per cluster. */ /* 32 bits per cluster. */
if (clusters >> 32) { if (clusters >> 32) {
ntfs_notice( ntfs_notice(
sb, sb,
"NTFS %u.%02u Gb is too big to use 32 bits per cluster", "NTFS %u.%02u Gb is too big to use 32 bits per cluster.",
gb, mb); gb, mb);
goto out; goto out;
} }
...@@ -885,17 +890,17 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size, ...@@ -885,17 +890,17 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
sbi->volume.blocks = sbi->volume.size >> sb->s_blocksize_bits; sbi->volume.blocks = sbi->volume.size >> sb->s_blocksize_bits;
/* Maximum size for normal files. */ /* Maximum size for normal files. */
sbi->maxbytes = (clusters << sbi->cluster_bits) - 1; sbi->maxbytes = (clusters << cluster_bits) - 1;
#ifdef CONFIG_NTFS3_64BIT_CLUSTER #ifdef CONFIG_NTFS3_64BIT_CLUSTER
if (clusters >= (1ull << (64 - sbi->cluster_bits))) if (clusters >= (1ull << (64 - cluster_bits)))
sbi->maxbytes = -1; sbi->maxbytes = -1;
sbi->maxbytes_sparse = -1; sbi->maxbytes_sparse = -1;
sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_maxbytes = MAX_LFS_FILESIZE;
#else #else
/* Maximum size for sparse file. */ /* Maximum size for sparse file. */
sbi->maxbytes_sparse = (1ull << (sbi->cluster_bits + 32)) - 1; sbi->maxbytes_sparse = (1ull << (cluster_bits + 32)) - 1;
sb->s_maxbytes = 0xFFFFFFFFull << sbi->cluster_bits; sb->s_maxbytes = 0xFFFFFFFFull << cluster_bits;
#endif #endif
/* /*
...@@ -903,7 +908,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size, ...@@ -903,7 +908,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
* It would be nice if we are able to allocate 1/8 of * It would be nice if we are able to allocate 1/8 of
* total clusters for MFT but not more then 512 MB. * total clusters for MFT but not more then 512 MB.
*/ */
sbi->zone_max = min_t(CLST, 0x20000000 >> sbi->cluster_bits, clusters >> 3); sbi->zone_max = min_t(CLST, 0x20000000 >> cluster_bits, clusters >> 3);
err = 0; err = 0;
...@@ -1433,7 +1438,7 @@ static const struct fs_context_operations ntfs_context_ops = { ...@@ -1433,7 +1438,7 @@ static const struct fs_context_operations ntfs_context_ops = {
}; };
/* /*
* ntfs_init_fs_context - Initialize spi and opts * ntfs_init_fs_context - Initialize sbi and opts
* *
* This will called when mount/remount. We will first initialize * This will called when mount/remount. We will first initialize
* options so that if remount we can use just that. * options so that if remount we can use just that.
...@@ -1506,7 +1511,8 @@ static int __init init_ntfs_fs(void) ...@@ -1506,7 +1511,8 @@ static int __init init_ntfs_fs(void)
if (IS_ENABLED(CONFIG_NTFS3_FS_POSIX_ACL)) if (IS_ENABLED(CONFIG_NTFS3_FS_POSIX_ACL))
pr_info("ntfs3: Enabled Linux POSIX ACLs support\n"); pr_info("ntfs3: Enabled Linux POSIX ACLs support\n");
if (IS_ENABLED(CONFIG_NTFS3_64BIT_CLUSTER)) if (IS_ENABLED(CONFIG_NTFS3_64BIT_CLUSTER))
pr_notice("ntfs3: Warning: Activated 64 bits per cluster. Windows does not support this\n"); pr_notice(
"ntfs3: Warning: Activated 64 bits per cluster. Windows does not support this\n");
if (IS_ENABLED(CONFIG_NTFS3_LZX_XPRESS)) if (IS_ENABLED(CONFIG_NTFS3_LZX_XPRESS))
pr_info("ntfs3: Read-only LZX/Xpress compression included\n"); pr_info("ntfs3: Read-only LZX/Xpress compression included\n");
...@@ -1549,7 +1555,9 @@ MODULE_DESCRIPTION("ntfs3 read/write filesystem"); ...@@ -1549,7 +1555,9 @@ MODULE_DESCRIPTION("ntfs3 read/write filesystem");
MODULE_INFO(behaviour, "Enabled Linux POSIX ACLs support"); MODULE_INFO(behaviour, "Enabled Linux POSIX ACLs support");
#endif #endif
#ifdef CONFIG_NTFS3_64BIT_CLUSTER #ifdef CONFIG_NTFS3_64BIT_CLUSTER
MODULE_INFO(cluster, "Warning: Activated 64 bits per cluster. Windows does not support this"); MODULE_INFO(
cluster,
"Warning: Activated 64 bits per cluster. Windows does not support this");
#endif #endif
#ifdef CONFIG_NTFS3_LZX_XPRESS #ifdef CONFIG_NTFS3_LZX_XPRESS
MODULE_INFO(compression, "Read-only lzx/xpress compression included"); MODULE_INFO(compression, "Read-only lzx/xpress compression included");
......
...@@ -23,8 +23,8 @@ ...@@ -23,8 +23,8 @@
static inline size_t unpacked_ea_size(const struct EA_FULL *ea) static inline size_t unpacked_ea_size(const struct EA_FULL *ea)
{ {
return ea->size ? le32_to_cpu(ea->size) return ea->size ? le32_to_cpu(ea->size) :
: ALIGN(struct_size(ea, name, ALIGN(struct_size(ea, name,
1 + ea->name_len + 1 + ea->name_len +
le16_to_cpu(ea->elength)), le16_to_cpu(ea->elength)),
4); 4);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment