Commit 24e17b5f authored by Nathan Scott's avatar Nathan Scott Committed by Christoph Hellwig

[XFS] Use the right offset when ensuring a delayed allocate conversion has...

[XFS] Use the right offset when ensuring a delayed allocate conversion has covered the offset originally requested.  Can cause data corruption when multiple processes are performing writeout on different areas of the same file.  Quite difficult to hit though.

SGI Modid: xfs-linux:xfs-kern:22377a
Signed-off-by: default avatarNathan Scott <nathans@sgi.com>
Signed-off-by: default avatarChristoph Hellwig <hch@sgi.com>
.
parent 775bf6c9
...@@ -558,6 +558,7 @@ xfs_submit_page( ...@@ -558,6 +558,7 @@ xfs_submit_page(
int i; int i;
BUG_ON(PageWriteback(page)); BUG_ON(PageWriteback(page));
if (bh_count)
set_page_writeback(page); set_page_writeback(page);
if (clear_dirty) if (clear_dirty)
clear_page_dirty(page); clear_page_dirty(page);
...@@ -578,9 +579,6 @@ xfs_submit_page( ...@@ -578,9 +579,6 @@ xfs_submit_page(
if (probed_page && clear_dirty) if (probed_page && clear_dirty)
wbc->nr_to_write--; /* Wrote an "extra" page */ wbc->nr_to_write--; /* Wrote an "extra" page */
} else {
end_page_writeback(page);
wbc->pages_skipped++; /* We didn't write this page */
} }
} }
...@@ -602,21 +600,26 @@ xfs_convert_page( ...@@ -602,21 +600,26 @@ xfs_convert_page(
{ {
struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head; struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
xfs_iomap_t *mp = iomapp, *tmp; xfs_iomap_t *mp = iomapp, *tmp;
unsigned long end, offset; unsigned long offset, end_offset;
pgoff_t end_index; int index = 0;
int i = 0, index = 0;
int bbits = inode->i_blkbits; int bbits = inode->i_blkbits;
int len, page_dirty;
end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT; end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1));
if (page->index < end_index) {
end = PAGE_CACHE_SIZE; /*
} else { * page_dirty is initially a count of buffers on the page before
end = i_size_read(inode) & (PAGE_CACHE_SIZE-1); * EOF and is decrememted as we move each into a cleanable state.
} */
len = 1 << inode->i_blkbits;
end_offset = max(end_offset, PAGE_CACHE_SIZE);
end_offset = roundup(end_offset, len);
page_dirty = end_offset / len;
offset = 0;
bh = head = page_buffers(page); bh = head = page_buffers(page);
do { do {
offset = i << bbits; if (offset >= end_offset)
if (offset >= end)
break; break;
if (!(PageUptodate(page) || buffer_uptodate(bh))) if (!(PageUptodate(page) || buffer_uptodate(bh)))
continue; continue;
...@@ -625,6 +628,7 @@ xfs_convert_page( ...@@ -625,6 +628,7 @@ xfs_convert_page(
if (startio) { if (startio) {
lock_buffer(bh); lock_buffer(bh);
bh_arr[index++] = bh; bh_arr[index++] = bh;
page_dirty--;
} }
continue; continue;
} }
...@@ -657,10 +661,11 @@ xfs_convert_page( ...@@ -657,10 +661,11 @@ xfs_convert_page(
unlock_buffer(bh); unlock_buffer(bh);
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
} }
} while (i++, (bh = bh->b_this_page) != head); page_dirty--;
} while (offset += len, (bh = bh->b_this_page) != head);
if (startio) { if (startio && index) {
xfs_submit_page(page, wbc, bh_arr, index, 1, index == i); xfs_submit_page(page, wbc, bh_arr, index, 1, !page_dirty);
} else { } else {
unlock_page(page); unlock_page(page);
} }
...@@ -743,19 +748,22 @@ xfs_page_state_convert( ...@@ -743,19 +748,22 @@ xfs_page_state_convert(
} }
} }
offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
end_offset = min_t(unsigned long long, end_offset = min_t(unsigned long long,
offset + PAGE_CACHE_SIZE, i_size_read(inode)); (loff_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
bh = head = page_buffers(page);
iomp = NULL;
/* /*
* page_dirty is initially a count of buffers on the page and * page_dirty is initially a count of buffers on the page before
* is decrememted as we move each into a cleanable state. * EOF and is decrememted as we move each into a cleanable state.
*/ */
len = bh->b_size; len = 1 << inode->i_blkbits;
page_dirty = PAGE_CACHE_SIZE / len; p_offset = max(p_offset, PAGE_CACHE_SIZE);
p_offset = roundup(p_offset, len);
page_dirty = p_offset / len;
iomp = NULL;
p_offset = 0;
bh = head = page_buffers(page);
do { do {
if (offset >= end_offset) if (offset >= end_offset)
...@@ -877,8 +885,10 @@ xfs_page_state_convert( ...@@ -877,8 +885,10 @@ xfs_page_state_convert(
if (uptodate && bh == head) if (uptodate && bh == head)
SetPageUptodate(page); SetPageUptodate(page);
if (startio) if (startio) {
xfs_submit_page(page, wbc, bh_arr, cnt, 0, 1); WARN_ON(page_dirty);
xfs_submit_page(page, wbc, bh_arr, cnt, 0, !page_dirty);
}
if (iomp) { if (iomp) {
offset = (iomp->iomap_offset + iomp->iomap_bsize - 1) >> offset = (iomp->iomap_offset + iomp->iomap_bsize - 1) >>
......
...@@ -308,7 +308,8 @@ xfs_iomap( ...@@ -308,7 +308,8 @@ xfs_iomap(
break; break;
} }
error = XFS_IOMAP_WRITE_ALLOCATE(mp, io, &imap, &nimaps); error = XFS_IOMAP_WRITE_ALLOCATE(mp, io, offset, count,
&imap, &nimaps);
break; break;
case BMAPI_UNWRITTEN: case BMAPI_UNWRITTEN:
lockmode = 0; lockmode = 0;
...@@ -746,6 +747,8 @@ xfs_iomap_write_delay( ...@@ -746,6 +747,8 @@ xfs_iomap_write_delay(
int int
xfs_iomap_write_allocate( xfs_iomap_write_allocate(
xfs_inode_t *ip, xfs_inode_t *ip,
loff_t offset,
size_t count,
xfs_bmbt_irec_t *map, xfs_bmbt_irec_t *map,
int *retmap) int *retmap)
{ {
...@@ -770,9 +773,9 @@ xfs_iomap_write_allocate( ...@@ -770,9 +773,9 @@ xfs_iomap_write_allocate(
if ((error = XFS_QM_DQATTACH(mp, ip, 0))) if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
return XFS_ERROR(error); return XFS_ERROR(error);
offset_fsb = map->br_startoff; offset_fsb = XFS_B_TO_FSBT(mp, offset);
count_fsb = map->br_blockcount; count_fsb = map->br_blockcount;
map_start_fsb = offset_fsb; map_start_fsb = map->br_startoff;
XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
...@@ -868,8 +871,8 @@ xfs_iomap_write_allocate( ...@@ -868,8 +871,8 @@ xfs_iomap_write_allocate(
imap[i].br_startoff, imap[i].br_startoff,
imap[i].br_blockcount,imap[i].br_state); imap[i].br_blockcount,imap[i].br_state);
} }
if ((map->br_startoff >= imap[i].br_startoff) && if ((offset_fsb >= imap[i].br_startoff) &&
(map->br_startoff < (imap[i].br_startoff + (offset_fsb < (imap[i].br_startoff +
imap[i].br_blockcount))) { imap[i].br_blockcount))) {
*map = imap[i]; *map = imap[i];
*retmap = 1; *retmap = 1;
...@@ -883,9 +886,8 @@ xfs_iomap_write_allocate( ...@@ -883,9 +886,8 @@ xfs_iomap_write_allocate(
* file, just surrounding data, try again. * file, just surrounding data, try again.
*/ */
nimaps--; nimaps--;
offset_fsb = imap[nimaps].br_startoff + map_start_fsb = imap[nimaps].br_startoff +
imap[nimaps].br_blockcount; imap[nimaps].br_blockcount;
map_start_fsb = offset_fsb;
} }
trans_cancel: trans_cancel:
......
...@@ -29,9 +29,6 @@ ...@@ -29,9 +29,6 @@
* *
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/ */
#ifndef __XFS_IOMAP_H__ #ifndef __XFS_IOMAP_H__
#define __XFS_IOMAP_H__ #define __XFS_IOMAP_H__
...@@ -100,7 +97,7 @@ extern int xfs_iomap_write_direct(struct xfs_inode *, loff_t, size_t, ...@@ -100,7 +97,7 @@ extern int xfs_iomap_write_direct(struct xfs_inode *, loff_t, size_t,
int, struct xfs_bmbt_irec *, int *, int); int, struct xfs_bmbt_irec *, int *, int);
extern int xfs_iomap_write_delay(struct xfs_inode *, loff_t, size_t, int, extern int xfs_iomap_write_delay(struct xfs_inode *, loff_t, size_t, int,
struct xfs_bmbt_irec *, int *); struct xfs_bmbt_irec *, int *);
extern int xfs_iomap_write_allocate(struct xfs_inode *, extern int xfs_iomap_write_allocate(struct xfs_inode *, loff_t, size_t,
struct xfs_bmbt_irec *, int *); struct xfs_bmbt_irec *, int *);
extern int xfs_iomap_write_unwritten(struct xfs_inode *, loff_t, size_t); extern int xfs_iomap_write_unwritten(struct xfs_inode *, loff_t, size_t);
......
...@@ -216,7 +216,8 @@ typedef int (*xfs_iomap_write_delay_t)( ...@@ -216,7 +216,8 @@ typedef int (*xfs_iomap_write_delay_t)(
void *, loff_t, size_t, int, void *, loff_t, size_t, int,
struct xfs_bmbt_irec *, int *); struct xfs_bmbt_irec *, int *);
typedef int (*xfs_iomap_write_allocate_t)( typedef int (*xfs_iomap_write_allocate_t)(
void *, struct xfs_bmbt_irec *, int *); void *, loff_t, size_t,
struct xfs_bmbt_irec *, int *);
typedef int (*xfs_iomap_write_unwritten_t)( typedef int (*xfs_iomap_write_unwritten_t)(
void *, loff_t, size_t); void *, loff_t, size_t);
typedef uint (*xfs_lck_map_shared_t)(void *); typedef uint (*xfs_lck_map_shared_t)(void *);
...@@ -258,9 +259,9 @@ typedef struct xfs_ioops { ...@@ -258,9 +259,9 @@ typedef struct xfs_ioops {
#define XFS_IOMAP_WRITE_DELAY(mp, io, offset, count, flags, mval, nmap) \ #define XFS_IOMAP_WRITE_DELAY(mp, io, offset, count, flags, mval, nmap) \
(*(mp)->m_io_ops.xfs_iomap_write_delay) \ (*(mp)->m_io_ops.xfs_iomap_write_delay) \
((io)->io_obj, offset, count, flags, mval, nmap) ((io)->io_obj, offset, count, flags, mval, nmap)
#define XFS_IOMAP_WRITE_ALLOCATE(mp, io, mval, nmap) \ #define XFS_IOMAP_WRITE_ALLOCATE(mp, io, offset, count, mval, nmap) \
(*(mp)->m_io_ops.xfs_iomap_write_allocate) \ (*(mp)->m_io_ops.xfs_iomap_write_allocate) \
((io)->io_obj, mval, nmap) ((io)->io_obj, offset, count, mval, nmap)
#define XFS_IOMAP_WRITE_UNWRITTEN(mp, io, offset, count) \ #define XFS_IOMAP_WRITE_UNWRITTEN(mp, io, offset, count) \
(*(mp)->m_io_ops.xfs_iomap_write_unwritten) \ (*(mp)->m_io_ops.xfs_iomap_write_unwritten) \
((io)->io_obj, offset, count) ((io)->io_obj, offset, count)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment