Commit 088d812f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-v3.10-rc3' of git://oss.sgi.com/xfs/xfs

Pull xfs fixes from Ben Myers:
 "Here are fixes for corruption on 512 byte filesystems, a rounding
  error, a use-after-free, some flags to fix lockdep reports, and
  several fixes related to CRCs.  We have a somewhat larger post -rc1
  queue than usual due to fixes related to the CRC feature we merged for
  3.10:

   - Fix for corruption with FSX on 512 byte blocksize filesystems
   - Fix rounding error in xfs_free_file_space
   - Fix use-after-free with extent free intents
   - Add several missing KM_NOFS flags to fix lockdep reports
   - Several fixes for CRC related code"

* tag 'for-linus-v3.10-rc3' of git://oss.sgi.com/xfs/xfs:
  xfs: remote attribute lookups require the value length
  xfs: xfs_attr_shortform_allfit() does not handle attr3 format.
  xfs: xfs_da3_node_read_verify() doesn't handle XFS_ATTR3_LEAF_MAGIC
  xfs: fix missing KM_NOFS tags to keep lockdep happy
  xfs: Don't reference the EFI after it is freed
  xfs: fix rounding in xfs_free_file_space
  xfs: fix sub-page blocksize data integrity writes
parents 72de4c63 7ae07780
...@@ -725,6 +725,25 @@ xfs_convert_page( ...@@ -725,6 +725,25 @@ xfs_convert_page(
(xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
i_size_read(inode)); i_size_read(inode));
/*
* If the current map does not span the entire page we are about to try
* to write, then give up. The only way we can write a page that spans
* multiple mappings in a single writeback iteration is via the
* xfs_vm_writepage() function. Data integrity writeback requires the
* entire page to be written in a single attempt, otherwise the part of
* the page we don't write here doesn't get written as part of the data
* integrity sync.
*
* For normal writeback, we also don't attempt to write partial pages
* here as it simply means that write_cache_pages() will see it under
* writeback and ignore the page until some point in the future, at
* which time this will be the only page in the file that needs
* writeback. Hence for more optimal IO patterns, we should always
* avoid partial page writeback due to multiple mappings on a page here.
*/
if (!xfs_imap_valid(inode, imap, end_offset))
goto fail_unlock_page;
len = 1 << inode->i_blkbits; len = 1 << inode->i_blkbits;
p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1), p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
PAGE_CACHE_SIZE); PAGE_CACHE_SIZE);
......
...@@ -934,17 +934,19 @@ xfs_attr_shortform_allfit( ...@@ -934,17 +934,19 @@ xfs_attr_shortform_allfit(
struct xfs_buf *bp, struct xfs_buf *bp,
struct xfs_inode *dp) struct xfs_inode *dp)
{ {
xfs_attr_leafblock_t *leaf; struct xfs_attr_leafblock *leaf;
xfs_attr_leaf_entry_t *entry; struct xfs_attr_leaf_entry *entry;
xfs_attr_leaf_name_local_t *name_loc; xfs_attr_leaf_name_local_t *name_loc;
int bytes, i; struct xfs_attr3_icleaf_hdr leafhdr;
int bytes;
int i;
leaf = bp->b_addr; leaf = bp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf);
entry = xfs_attr3_leaf_entryp(leaf);
entry = &leaf->entries[0];
bytes = sizeof(struct xfs_attr_sf_hdr); bytes = sizeof(struct xfs_attr_sf_hdr);
for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) { for (i = 0; i < leafhdr.count; entry++, i++) {
if (entry->flags & XFS_ATTR_INCOMPLETE) if (entry->flags & XFS_ATTR_INCOMPLETE)
continue; /* don't copy partial entries */ continue; /* don't copy partial entries */
if (!(entry->flags & XFS_ATTR_LOCAL)) if (!(entry->flags & XFS_ATTR_LOCAL))
...@@ -954,15 +956,15 @@ xfs_attr_shortform_allfit( ...@@ -954,15 +956,15 @@ xfs_attr_shortform_allfit(
return(0); return(0);
if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX) if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX)
return(0); return(0);
bytes += sizeof(struct xfs_attr_sf_entry)-1 bytes += sizeof(struct xfs_attr_sf_entry) - 1
+ name_loc->namelen + name_loc->namelen
+ be16_to_cpu(name_loc->valuelen); + be16_to_cpu(name_loc->valuelen);
} }
if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) && if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) &&
(dp->i_d.di_format != XFS_DINODE_FMT_BTREE) && (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
(bytes == sizeof(struct xfs_attr_sf_hdr))) (bytes == sizeof(struct xfs_attr_sf_hdr)))
return(-1); return -1;
return(xfs_attr_shortform_bytesfit(dp, bytes)); return xfs_attr_shortform_bytesfit(dp, bytes);
} }
/* /*
...@@ -2330,9 +2332,10 @@ xfs_attr3_leaf_lookup_int( ...@@ -2330,9 +2332,10 @@ xfs_attr3_leaf_lookup_int(
if (!xfs_attr_namesp_match(args->flags, entry->flags)) if (!xfs_attr_namesp_match(args->flags, entry->flags))
continue; continue;
args->index = probe; args->index = probe;
args->valuelen = be32_to_cpu(name_rmt->valuelen);
args->rmtblkno = be32_to_cpu(name_rmt->valueblk); args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount,
be32_to_cpu(name_rmt->valuelen)); args->valuelen);
return XFS_ERROR(EEXIST); return XFS_ERROR(EEXIST);
} }
} }
......
...@@ -1649,7 +1649,7 @@ xfs_alloc_buftarg( ...@@ -1649,7 +1649,7 @@ xfs_alloc_buftarg(
{ {
xfs_buftarg_t *btp; xfs_buftarg_t *btp;
btp = kmem_zalloc(sizeof(*btp), KM_SLEEP); btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
btp->bt_mount = mp; btp->bt_mount = mp;
btp->bt_dev = bdev->bd_dev; btp->bt_dev = bdev->bd_dev;
......
...@@ -270,6 +270,7 @@ xfs_da3_node_read_verify( ...@@ -270,6 +270,7 @@ xfs_da3_node_read_verify(
break; break;
return; return;
case XFS_ATTR_LEAF_MAGIC: case XFS_ATTR_LEAF_MAGIC:
case XFS_ATTR3_LEAF_MAGIC:
bp->b_ops = &xfs_attr3_leaf_buf_ops; bp->b_ops = &xfs_attr3_leaf_buf_ops;
bp->b_ops->verify_read(bp); bp->b_ops->verify_read(bp);
return; return;
...@@ -2464,7 +2465,8 @@ xfs_buf_map_from_irec( ...@@ -2464,7 +2465,8 @@ xfs_buf_map_from_irec(
ASSERT(nirecs >= 1); ASSERT(nirecs >= 1);
if (nirecs > 1) { if (nirecs > 1) {
map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_SLEEP); map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
KM_SLEEP | KM_NOFS);
if (!map) if (!map)
return ENOMEM; return ENOMEM;
*mapp = map; *mapp = map;
...@@ -2520,7 +2522,8 @@ xfs_dabuf_map( ...@@ -2520,7 +2522,8 @@ xfs_dabuf_map(
* Optimize the one-block case. * Optimize the one-block case.
*/ */
if (nfsb != 1) if (nfsb != 1)
irecs = kmem_zalloc(sizeof(irec) * nfsb, KM_SLEEP); irecs = kmem_zalloc(sizeof(irec) * nfsb,
KM_SLEEP | KM_NOFS);
nirecs = nfsb; nirecs = nfsb;
error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs, error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
......
...@@ -1336,7 +1336,7 @@ xfs_dir2_leaf_getdents( ...@@ -1336,7 +1336,7 @@ xfs_dir2_leaf_getdents(
mp->m_sb.sb_blocksize); mp->m_sb.sb_blocksize);
map_info = kmem_zalloc(offsetof(struct xfs_dir2_leaf_map_info, map) + map_info = kmem_zalloc(offsetof(struct xfs_dir2_leaf_map_info, map) +
(length * sizeof(struct xfs_bmbt_irec)), (length * sizeof(struct xfs_bmbt_irec)),
KM_SLEEP); KM_SLEEP | KM_NOFS);
map_info->map_size = length; map_info->map_size = length;
/* /*
......
...@@ -305,11 +305,12 @@ xfs_efi_release(xfs_efi_log_item_t *efip, ...@@ -305,11 +305,12 @@ xfs_efi_release(xfs_efi_log_item_t *efip,
{ {
ASSERT(atomic_read(&efip->efi_next_extent) >= nextents); ASSERT(atomic_read(&efip->efi_next_extent) >= nextents);
if (atomic_sub_and_test(nextents, &efip->efi_next_extent)) { if (atomic_sub_and_test(nextents, &efip->efi_next_extent)) {
__xfs_efi_release(efip);
/* recovery needs us to drop the EFI reference, too */ /* recovery needs us to drop the EFI reference, too */
if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags))
__xfs_efi_release(efip); __xfs_efi_release(efip);
__xfs_efi_release(efip);
/* efip may now have been freed, do not reference it again. */
} }
} }
......
...@@ -139,7 +139,7 @@ xlog_cil_prepare_log_vecs( ...@@ -139,7 +139,7 @@ xlog_cil_prepare_log_vecs(
new_lv = kmem_zalloc(sizeof(*new_lv) + new_lv = kmem_zalloc(sizeof(*new_lv) +
niovecs * sizeof(struct xfs_log_iovec), niovecs * sizeof(struct xfs_log_iovec),
KM_SLEEP); KM_SLEEP|KM_NOFS);
/* The allocated iovec region lies beyond the log vector. */ /* The allocated iovec region lies beyond the log vector. */
new_lv->lv_iovecp = (struct xfs_log_iovec *)&new_lv[1]; new_lv->lv_iovecp = (struct xfs_log_iovec *)&new_lv[1];
......
...@@ -1453,7 +1453,7 @@ xfs_free_file_space( ...@@ -1453,7 +1453,7 @@ xfs_free_file_space(
xfs_mount_t *mp; xfs_mount_t *mp;
int nimap; int nimap;
uint resblks; uint resblks;
uint rounding; xfs_off_t rounding;
int rt; int rt;
xfs_fileoff_t startoffset_fsb; xfs_fileoff_t startoffset_fsb;
xfs_trans_t *tp; xfs_trans_t *tp;
...@@ -1482,7 +1482,7 @@ xfs_free_file_space( ...@@ -1482,7 +1482,7 @@ xfs_free_file_space(
inode_dio_wait(VFS_I(ip)); inode_dio_wait(VFS_I(ip));
} }
rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
ioffset = offset & ~(rounding - 1); ioffset = offset & ~(rounding - 1);
error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping, error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
ioffset, -1); ioffset, -1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment