Commit c40ea741 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Lachlan McIlroy

[XFS] kill superflous buffer locking

There is no need to lock any page in xfs_buf.c because we operate on our
own address_space and all locking is covered by the buffer semaphore. If
we ever switch back to main blockdeive address_space as suggested e.g. for
fsblock with a similar scheme the locking will have to be totally revised
anyway because the current scheme is neither correct nor coherent with
itself.

SGI-PV: 971186
SGI-Modid: xfs-linux-melb:xfs-kern:29845a
Signed-off-by: default avatarChristoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarLachlan McIlroy <lachlan@sgi.com>
Signed-off-by: default avatarTim Shimmin <tes@sgi.com>
parent 0771fb45
...@@ -409,6 +409,7 @@ _xfs_buf_lookup_pages( ...@@ -409,6 +409,7 @@ _xfs_buf_lookup_pages(
congestion_wait(WRITE, HZ/50); congestion_wait(WRITE, HZ/50);
goto retry; goto retry;
} }
unlock_page(page);
XFS_STATS_INC(xb_page_found); XFS_STATS_INC(xb_page_found);
...@@ -418,10 +419,7 @@ _xfs_buf_lookup_pages( ...@@ -418,10 +419,7 @@ _xfs_buf_lookup_pages(
ASSERT(!PagePrivate(page)); ASSERT(!PagePrivate(page));
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
page_count--; page_count--;
if (blocksize >= PAGE_CACHE_SIZE) { if (blocksize < PAGE_CACHE_SIZE && !PagePrivate(page)) {
if (flags & XBF_READ)
bp->b_locked = 1;
} else if (!PagePrivate(page)) {
if (test_page_region(page, offset, nbytes)) if (test_page_region(page, offset, nbytes))
page_count++; page_count++;
} }
...@@ -431,11 +429,6 @@ _xfs_buf_lookup_pages( ...@@ -431,11 +429,6 @@ _xfs_buf_lookup_pages(
offset = 0; offset = 0;
} }
if (!bp->b_locked) {
for (i = 0; i < bp->b_page_count; i++)
unlock_page(bp->b_pages[i]);
}
if (page_count == bp->b_page_count) if (page_count == bp->b_page_count)
bp->b_flags |= XBF_DONE; bp->b_flags |= XBF_DONE;
...@@ -752,7 +745,6 @@ xfs_buf_associate_memory( ...@@ -752,7 +745,6 @@ xfs_buf_associate_memory(
bp->b_pages[i] = mem_to_page((void *)pageaddr); bp->b_pages[i] = mem_to_page((void *)pageaddr);
pageaddr += PAGE_CACHE_SIZE; pageaddr += PAGE_CACHE_SIZE;
} }
bp->b_locked = 0;
bp->b_count_desired = len; bp->b_count_desired = len;
bp->b_buffer_length = buflen; bp->b_buffer_length = buflen;
...@@ -1099,25 +1091,13 @@ xfs_buf_iostart( ...@@ -1099,25 +1091,13 @@ xfs_buf_iostart(
return status; return status;
} }
STATIC_INLINE int
_xfs_buf_iolocked(
xfs_buf_t *bp)
{
ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
if (bp->b_flags & XBF_READ)
return bp->b_locked;
return 0;
}
STATIC_INLINE void STATIC_INLINE void
_xfs_buf_ioend( _xfs_buf_ioend(
xfs_buf_t *bp, xfs_buf_t *bp,
int schedule) int schedule)
{ {
if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
bp->b_locked = 0;
xfs_buf_ioend(bp, schedule); xfs_buf_ioend(bp, schedule);
}
} }
STATIC void STATIC void
...@@ -1148,10 +1128,6 @@ xfs_buf_bio_end_io( ...@@ -1148,10 +1128,6 @@ xfs_buf_bio_end_io(
if (--bvec >= bio->bi_io_vec) if (--bvec >= bio->bi_io_vec)
prefetchw(&bvec->bv_page->flags); prefetchw(&bvec->bv_page->flags);
if (_xfs_buf_iolocked(bp)) {
unlock_page(page);
}
} while (bvec >= bio->bi_io_vec); } while (bvec >= bio->bi_io_vec);
_xfs_buf_ioend(bp, 1); _xfs_buf_ioend(bp, 1);
...@@ -1162,13 +1138,12 @@ STATIC void ...@@ -1162,13 +1138,12 @@ STATIC void
_xfs_buf_ioapply( _xfs_buf_ioapply(
xfs_buf_t *bp) xfs_buf_t *bp)
{ {
int i, rw, map_i, total_nr_pages, nr_pages; int rw, map_i, total_nr_pages, nr_pages;
struct bio *bio; struct bio *bio;
int offset = bp->b_offset; int offset = bp->b_offset;
int size = bp->b_count_desired; int size = bp->b_count_desired;
sector_t sector = bp->b_bn; sector_t sector = bp->b_bn;
unsigned int blocksize = bp->b_target->bt_bsize; unsigned int blocksize = bp->b_target->bt_bsize;
int locking = _xfs_buf_iolocked(bp);
total_nr_pages = bp->b_page_count; total_nr_pages = bp->b_page_count;
map_i = 0; map_i = 0;
...@@ -1191,7 +1166,7 @@ _xfs_buf_ioapply( ...@@ -1191,7 +1166,7 @@ _xfs_buf_ioapply(
* filesystem block size is not smaller than the page size. * filesystem block size is not smaller than the page size.
*/ */
if ((bp->b_buffer_length < PAGE_CACHE_SIZE) && if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
(bp->b_flags & XBF_READ) && locking && (bp->b_flags & XBF_READ) &&
(blocksize >= PAGE_CACHE_SIZE)) { (blocksize >= PAGE_CACHE_SIZE)) {
bio = bio_alloc(GFP_NOIO, 1); bio = bio_alloc(GFP_NOIO, 1);
...@@ -1208,24 +1183,6 @@ _xfs_buf_ioapply( ...@@ -1208,24 +1183,6 @@ _xfs_buf_ioapply(
goto submit_io; goto submit_io;
} }
/* Lock down the pages which we need to for the request */
if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
for (i = 0; size; i++) {
int nbytes = PAGE_CACHE_SIZE - offset;
struct page *page = bp->b_pages[i];
if (nbytes > size)
nbytes = size;
lock_page(page);
size -= nbytes;
offset = 0;
}
offset = bp->b_offset;
size = bp->b_count_desired;
}
next_chunk: next_chunk:
atomic_inc(&bp->b_io_remaining); atomic_inc(&bp->b_io_remaining);
nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
......
...@@ -143,7 +143,6 @@ typedef struct xfs_buf { ...@@ -143,7 +143,6 @@ typedef struct xfs_buf {
void *b_fspriv2; void *b_fspriv2;
void *b_fspriv3; void *b_fspriv3;
unsigned short b_error; /* error code on I/O */ unsigned short b_error; /* error code on I/O */
unsigned short b_locked; /* page array is locked */
unsigned int b_page_count; /* size of page array */ unsigned int b_page_count; /* size of page array */
unsigned int b_offset; /* page offset in first page */ unsigned int b_offset; /* page offset in first page */
struct page **b_pages; /* array of page pointers */ struct page **b_pages; /* array of page pointers */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment