Commit 479d47e3 authored by Nathan Scott's avatar Nathan Scott Committed by Christoph Hellwig

[XFS] pagebuf can now take a configurable sector size (512 -> 32K).

SGI Modid: 2.5.x-xfs:slinx:132942a
parent 71247917
...@@ -512,6 +512,23 @@ xfs_free_buftarg( ...@@ -512,6 +512,23 @@ xfs_free_buftarg(
kfree(btp); kfree(btp);
} }
void
xfs_size_buftarg(
xfs_buftarg_t *btp,
unsigned int blocksize,
unsigned int sectorsize)
{
btp->pbr_bsize = blocksize;
btp->pbr_sshift = ffs(sectorsize) - 1;
btp->pbr_smask = sectorsize - 1;
if (set_blocksize(btp->pbr_bdev, sectorsize)) {
printk(KERN_WARNING
"XFS: Cannot set_blocksize to %u on device 0x%x\n",
sectorsize, btp->pbr_dev);
}
}
xfs_buftarg_t * xfs_buftarg_t *
xfs_alloc_buftarg( xfs_alloc_buftarg(
struct block_device *bdev) struct block_device *bdev)
...@@ -523,7 +540,7 @@ xfs_alloc_buftarg( ...@@ -523,7 +540,7 @@ xfs_alloc_buftarg(
btp->pbr_dev = bdev->bd_dev; btp->pbr_dev = bdev->bd_dev;
btp->pbr_bdev = bdev; btp->pbr_bdev = bdev;
btp->pbr_mapping = bdev->bd_inode->i_mapping; btp->pbr_mapping = bdev->bd_inode->i_mapping;
btp->pbr_blocksize = PAGE_CACHE_SIZE; xfs_size_buftarg(btp, PAGE_CACHE_SIZE, bdev_hardsect_size(bdev));
switch (MAJOR(btp->pbr_dev)) { switch (MAJOR(btp->pbr_dev)) {
case MD_MAJOR: case MD_MAJOR:
......
...@@ -87,6 +87,7 @@ extern int xfs_blkdev_get (const char *, struct block_device **); ...@@ -87,6 +87,7 @@ extern int xfs_blkdev_get (const char *, struct block_device **);
extern void xfs_blkdev_put (struct block_device *); extern void xfs_blkdev_put (struct block_device *);
extern struct pb_target *xfs_alloc_buftarg (struct block_device *); extern struct pb_target *xfs_alloc_buftarg (struct block_device *);
extern void xfs_size_buftarg (struct pb_target *, unsigned int, unsigned int);
extern void xfs_free_buftarg (struct pb_target *); extern void xfs_free_buftarg (struct pb_target *);
#endif /* __XFS_SUPER_H__ */ #endif /* __XFS_SUPER_H__ */
...@@ -63,10 +63,8 @@ ...@@ -63,10 +63,8 @@
#include "page_buf_internal.h" #include "page_buf_internal.h"
#define SECTOR_SHIFT 9 #define BBSHIFT 9
#define SECTOR_SIZE (1<<SECTOR_SHIFT) #define BN_ALIGN_MASK ((1 << (PAGE_CACHE_SHIFT - BBSHIFT)) - 1)
#define SECTOR_MASK (SECTOR_SIZE - 1)
#define BN_ALIGN_MASK ((1 << (PAGE_CACHE_SHIFT - SECTOR_SHIFT)) - 1)
#ifndef GFP_READAHEAD #ifndef GFP_READAHEAD
#define GFP_READAHEAD 0 #define GFP_READAHEAD 0
...@@ -471,7 +469,8 @@ _pagebuf_lookup_pages( ...@@ -471,7 +469,8 @@ _pagebuf_lookup_pages(
struct page *page; struct page *page;
int gfp_mask, retry_count = 5, rval = 0; int gfp_mask, retry_count = 5, rval = 0;
int all_mapped, good_pages, nbytes; int all_mapped, good_pages, nbytes;
size_t blocksize, size, offset; unsigned int blocksize, sectorshift;
size_t size, offset;
/* For pagebufs where we want to map an address, do not use /* For pagebufs where we want to map an address, do not use
...@@ -517,7 +516,8 @@ _pagebuf_lookup_pages( ...@@ -517,7 +516,8 @@ _pagebuf_lookup_pages(
return rval; return rval;
rval = pi = 0; rval = pi = 0;
blocksize = pb->pb_target->pbr_blocksize; blocksize = pb->pb_target->pbr_bsize;
sectorshift = pb->pb_target->pbr_sshift;
size = pb->pb_count_desired; size = pb->pb_count_desired;
offset = pb->pb_offset; offset = pb->pb_offset;
...@@ -558,15 +558,15 @@ _pagebuf_lookup_pages( ...@@ -558,15 +558,15 @@ _pagebuf_lookup_pages(
pb->pb_locked = 1; pb->pb_locked = 1;
good_pages--; good_pages--;
} else if (!PagePrivate(page)) { } else if (!PagePrivate(page)) {
unsigned long i, range = (offset + nbytes) >> SECTOR_SHIFT; unsigned long i, range;
ASSERT(blocksize < PAGE_CACHE_SIZE);
ASSERT(!(pb->pb_flags & _PBF_PRIVATE_BH));
/* /*
* In this case page->private holds a bitmap * In this case page->private holds a bitmap
* of uptodate sectors (512) within the page * of uptodate sectors within the page
*/ */
for (i = offset >> SECTOR_SHIFT; i < range; i++) ASSERT(blocksize < PAGE_CACHE_SIZE);
range = (offset + nbytes) >> sectorshift;
for (i = offset >> sectorshift; i < range; i++)
if (!test_bit(i, &page->private)) if (!test_bit(i, &page->private))
break; break;
if (i != range) if (i != range)
...@@ -651,8 +651,14 @@ _pagebuf_find( /* find buffer for block */ ...@@ -651,8 +651,14 @@ _pagebuf_find( /* find buffer for block */
page_buf_t *pb; page_buf_t *pb;
int not_locked; int not_locked;
range_base = (ioff << SECTOR_SHIFT); range_base = (ioff << BBSHIFT);
range_length = (isize << SECTOR_SHIFT); range_length = (isize << BBSHIFT);
/* Ensure we never do IOs smaller than the sector size */
BUG_ON(range_length < (1 << target->pbr_sshift));
/* Ensure we never do IOs that are not sector aligned */
BUG_ON(range_base & (loff_t)target->pbr_smask);
hval = _bhash(target->pbr_bdev->bd_dev, range_base); hval = _bhash(target->pbr_bdev->bd_dev, range_base);
h = &pbhash[hval]; h = &pbhash[hval];
...@@ -977,18 +983,12 @@ pagebuf_get_no_daddr( ...@@ -977,18 +983,12 @@ pagebuf_get_no_daddr(
} else { } else {
kfree(rmem); /* free the mem from the previous try */ kfree(rmem); /* free the mem from the previous try */
tlen <<= 1; /* double the size and try again */ tlen <<= 1; /* double the size and try again */
/*
printk(
"pb_get_no_daddr NOT block 0x%p mask 0x%p len %d\n",
rmem, ((size_t)rmem & (size_t)~SECTOR_MASK),
len);
*/
} }
if ((rmem = kmalloc(tlen, GFP_KERNEL)) == 0) { if ((rmem = kmalloc(tlen, GFP_KERNEL)) == 0) {
pagebuf_free(pb); pagebuf_free(pb);
return NULL; return NULL;
} }
} while ((size_t)rmem != ((size_t)rmem & (size_t)~SECTOR_MASK)); } while ((size_t)rmem != ((size_t)rmem & ~target->pbr_smask));
if ((rval = pagebuf_associate_memory(pb, rmem, len)) != 0) { if ((rval = pagebuf_associate_memory(pb, rmem, len)) != 0) {
kfree(rmem); kfree(rmem);
...@@ -1269,9 +1269,7 @@ pagebuf_iostart( /* start I/O on a buffer */ ...@@ -1269,9 +1269,7 @@ pagebuf_iostart( /* start I/O on a buffer */
pb->pb_flags &= ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_DELWRI|PBF_READ_AHEAD); pb->pb_flags &= ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_DELWRI|PBF_READ_AHEAD);
pb->pb_flags |= flags & (PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_SYNC|PBF_READ_AHEAD); pb->pb_flags |= flags & (PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_SYNC|PBF_READ_AHEAD);
if (pb->pb_bn == PAGE_BUF_DADDR_NULL) { BUG_ON(pb->pb_bn == PAGE_BUF_DADDR_NULL);
BUG();
}
/* For writes call internal function which checks for /* For writes call internal function which checks for
* filesystem specific callout function and execute it. * filesystem specific callout function and execute it.
...@@ -1300,7 +1298,8 @@ bio_end_io_pagebuf( ...@@ -1300,7 +1298,8 @@ bio_end_io_pagebuf(
int error) int error)
{ {
page_buf_t *pb = (page_buf_t *)bio->bi_private; page_buf_t *pb = (page_buf_t *)bio->bi_private;
unsigned int i, blocksize = pb->pb_target->pbr_blocksize; unsigned int i, blocksize = pb->pb_target->pbr_bsize;
unsigned int sectorshift = pb->pb_target->pbr_sshift;
struct bio_vec *bvec = bio->bi_io_vec; struct bio_vec *bvec = bio->bi_io_vec;
if (bio->bi_size) if (bio->bi_size)
...@@ -1320,10 +1319,8 @@ bio_end_io_pagebuf( ...@@ -1320,10 +1319,8 @@ bio_end_io_pagebuf(
unsigned int j, range; unsigned int j, range;
ASSERT(blocksize < PAGE_CACHE_SIZE); ASSERT(blocksize < PAGE_CACHE_SIZE);
ASSERT(!(pb->pb_flags & _PBF_PRIVATE_BH)); range = (bvec->bv_offset + bvec->bv_len) >> sectorshift;
for (j = bvec->bv_offset >> sectorshift; j < range; j++)
range = (bvec->bv_offset + bvec->bv_len)>>SECTOR_SHIFT;
for (j = bvec->bv_offset>>SECTOR_SHIFT; j < range; j++)
set_bit(j, &page->private); set_bit(j, &page->private);
if (page->private == (unsigned long)(PAGE_CACHE_SIZE-1)) if (page->private == (unsigned long)(PAGE_CACHE_SIZE-1))
SetPageUptodate(page); SetPageUptodate(page);
...@@ -1374,7 +1371,8 @@ pagebuf_iorequest( /* start real I/O */ ...@@ -1374,7 +1371,8 @@ pagebuf_iorequest( /* start real I/O */
int offset = pb->pb_offset; int offset = pb->pb_offset;
int size = pb->pb_count_desired; int size = pb->pb_count_desired;
sector_t sector = pb->pb_bn; sector_t sector = pb->pb_bn;
size_t blocksize = pb->pb_target->pbr_blocksize; unsigned int sectorshift = pb->pb_target->pbr_sshift;
unsigned int blocksize = pb->pb_target->pbr_bsize;
int locking; int locking;
locking = (pb->pb_flags & _PBF_LOCKABLE) == 0 && (pb->pb_locked == 0); locking = (pb->pb_flags & _PBF_LOCKABLE) == 0 && (pb->pb_locked == 0);
...@@ -1403,7 +1401,7 @@ pagebuf_iorequest( /* start real I/O */ ...@@ -1403,7 +1401,7 @@ pagebuf_iorequest( /* start real I/O */
bio = bio_alloc(GFP_NOIO, 1); bio = bio_alloc(GFP_NOIO, 1);
bio->bi_bdev = pb->pb_target->pbr_bdev; bio->bi_bdev = pb->pb_target->pbr_bdev;
bio->bi_sector = sector - (offset >> SECTOR_SHIFT); bio->bi_sector = sector - (offset >> BBSHIFT);
bio->bi_end_io = bio_end_io_pagebuf; bio->bi_end_io = bio_end_io_pagebuf;
bio->bi_private = pb; bio->bi_private = pb;
bio->bi_vcnt++; bio->bi_vcnt++;
...@@ -1448,7 +1446,7 @@ pagebuf_iorequest( /* start real I/O */ ...@@ -1448,7 +1446,7 @@ pagebuf_iorequest( /* start real I/O */
next_chunk: next_chunk:
atomic_inc(&PBP(pb)->pb_io_remaining); atomic_inc(&PBP(pb)->pb_io_remaining);
nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - SECTOR_SHIFT); nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
if (nr_pages > total_nr_pages) if (nr_pages > total_nr_pages)
nr_pages = total_nr_pages; nr_pages = total_nr_pages;
...@@ -1473,7 +1471,7 @@ pagebuf_iorequest( /* start real I/O */ ...@@ -1473,7 +1471,7 @@ pagebuf_iorequest( /* start real I/O */
offset = 0; offset = 0;
sector += nbytes >> SECTOR_SHIFT; sector += nbytes >> BBSHIFT;
size -= nbytes; size -= nbytes;
total_nr_pages--; total_nr_pages--;
} }
......
...@@ -136,7 +136,9 @@ typedef struct pb_target { ...@@ -136,7 +136,9 @@ typedef struct pb_target {
dev_t pbr_dev; dev_t pbr_dev;
struct block_device *pbr_bdev; struct block_device *pbr_bdev;
struct address_space *pbr_mapping; struct address_space *pbr_mapping;
unsigned int pbr_blocksize; unsigned int pbr_bsize;
unsigned int pbr_sshift;
size_t pbr_smask;
} pb_target_t; } pb_target_t;
/* /*
......
...@@ -96,16 +96,22 @@ int xfs_alloc_block_minrecs(int lev, struct xfs_btree_cur *cur); ...@@ -96,16 +96,22 @@ int xfs_alloc_block_minrecs(int lev, struct xfs_btree_cur *cur);
#endif #endif
/* /*
* Minimum and maximum blocksize. * Minimum and maximum blocksize and sectorsize.
* The blocksize upper limit is pretty much arbitrary. * The blocksize upper limit is pretty much arbitrary.
* The sectorsize upper limit is due to sizeof(sb_sectsize).
*/ */
#define XFS_MIN_BLOCKSIZE_LOG 9 /* i.e. 512 bytes */ #define XFS_MIN_BLOCKSIZE_LOG 9 /* i.e. 512 bytes */
#define XFS_MAX_BLOCKSIZE_LOG 16 /* i.e. 65536 bytes */ #define XFS_MAX_BLOCKSIZE_LOG 16 /* i.e. 65536 bytes */
#define XFS_MIN_BLOCKSIZE (1 << XFS_MIN_BLOCKSIZE_LOG) #define XFS_MIN_BLOCKSIZE (1 << XFS_MIN_BLOCKSIZE_LOG)
#define XFS_MAX_BLOCKSIZE (1 << XFS_MAX_BLOCKSIZE_LOG) #define XFS_MAX_BLOCKSIZE (1 << XFS_MAX_BLOCKSIZE_LOG)
#define XFS_MIN_SECTORSIZE_LOG 9 /* i.e. 512 bytes */
#define XFS_MAX_SECTORSIZE_LOG 15 /* i.e. 32768 bytes */
#define XFS_MIN_SECTORSIZE (1 << XFS_MIN_SECTORSIZE_LOG)
#define XFS_MAX_SECTORSIZE (1 << XFS_MAX_SECTORSIZE_LOG)
/* /*
* block numbers in the AG; SB is BB 0, AGF is BB 1, AGI is BB 2, AGFL is BB 3 * Block numbers in the AG:
* SB is sector 0, AGF is sector 1, AGI is sector 2, AGFL is sector 3.
*/ */
#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BNO_BLOCK) #if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BNO_BLOCK)
xfs_agblock_t xfs_bno_block(struct xfs_mount *mp); xfs_agblock_t xfs_bno_block(struct xfs_mount *mp);
......
...@@ -91,7 +91,8 @@ static struct { ...@@ -91,7 +91,8 @@ static struct {
{ offsetof(xfs_sb_t, sb_unit), 0 }, { offsetof(xfs_sb_t, sb_unit), 0 },
{ offsetof(xfs_sb_t, sb_width), 0 }, { offsetof(xfs_sb_t, sb_width), 0 },
{ offsetof(xfs_sb_t, sb_dirblklog), 0 }, { offsetof(xfs_sb_t, sb_dirblklog), 0 },
{ offsetof(xfs_sb_t, sb_dummy), 1 }, { offsetof(xfs_sb_t, sb_logsectlog), 0 },
{ offsetof(xfs_sb_t, sb_logsectsize),0 },
{ offsetof(xfs_sb_t, sb_logsunit), 0 }, { offsetof(xfs_sb_t, sb_logsunit), 0 },
{ sizeof(xfs_sb_t), 0 } { sizeof(xfs_sb_t), 0 }
}; };
...@@ -119,6 +120,7 @@ xfs_mount_init(void) ...@@ -119,6 +120,7 @@ xfs_mount_init(void)
spinlock_init(&mp->m_freeze_lock, "xfs_freeze"); spinlock_init(&mp->m_freeze_lock, "xfs_freeze");
init_sv(&mp->m_wait_unfreeze, SV_DEFAULT, "xfs_freeze", 0); init_sv(&mp->m_wait_unfreeze, SV_DEFAULT, "xfs_freeze", 0);
atomic_set(&mp->m_active_trans, 0); atomic_set(&mp->m_active_trans, 0);
mp->m_cxfstype = XFS_CXFS_NOT;
return mp; return mp;
} /* xfs_mount_init */ } /* xfs_mount_init */
...@@ -213,13 +215,26 @@ xfs_mount_validate_sb( ...@@ -213,13 +215,26 @@ xfs_mount_validate_sb(
return XFS_ERROR(EFSCORRUPTED); return XFS_ERROR(EFSCORRUPTED);
} }
if (!sbp->sb_logsectlog)
sbp->sb_logsectlog = sbp->sb_sectlog;
if (!sbp->sb_logsectsize)
sbp->sb_logsectsize = sbp->sb_sectsize;
/* /*
* More sanity checking. These were stolen directly from * More sanity checking. These were stolen directly from
* xfs_repair. * xfs_repair.
*/ */
if (sbp->sb_blocksize <= 0 || if (sbp->sb_agcount <= 0 ||
sbp->sb_agcount <= 0 || sbp->sb_sectsize < XFS_MIN_SECTORSIZE ||
sbp->sb_sectsize <= 0 || sbp->sb_sectsize > XFS_MAX_SECTORSIZE ||
sbp->sb_sectlog < XFS_MIN_SECTORSIZE_LOG ||
sbp->sb_sectlog > XFS_MAX_SECTORSIZE_LOG ||
sbp->sb_logsectsize < XFS_MIN_SECTORSIZE ||
sbp->sb_logsectsize > XFS_MAX_SECTORSIZE ||
sbp->sb_logsectlog < XFS_MIN_SECTORSIZE_LOG ||
sbp->sb_logsectlog > XFS_MAX_SECTORSIZE_LOG ||
sbp->sb_blocksize < XFS_MIN_BLOCKSIZE ||
sbp->sb_blocksize > XFS_MAX_BLOCKSIZE ||
sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG || sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG ||
sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG || sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
sbp->sb_inodesize < XFS_DINODE_MIN_SIZE || sbp->sb_inodesize < XFS_DINODE_MIN_SIZE ||
...@@ -232,7 +247,7 @@ xfs_mount_validate_sb( ...@@ -232,7 +247,7 @@ xfs_mount_validate_sb(
} }
/* /*
* sanity check ag count, size fields against data size field * Sanity check AG count, size fields against data size field
*/ */
if (sbp->sb_dblocks == 0 || if (sbp->sb_dblocks == 0 ||
sbp->sb_dblocks > sbp->sb_dblocks >
...@@ -268,7 +283,8 @@ xfs_mount_validate_sb( ...@@ -268,7 +283,8 @@ xfs_mount_validate_sb(
PAGE_SIZE); PAGE_SIZE);
return XFS_ERROR(EWRONGFS); return XFS_ERROR(EWRONGFS);
} }
return (0);
return 0;
} }
void void
...@@ -467,6 +483,7 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp) ...@@ -467,6 +483,7 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
mp->m_maxagi = mp->m_sb.sb_agcount; mp->m_maxagi = mp->m_sb.sb_agcount;
mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG; mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG;
mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT; mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT;
mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT;
mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1; mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1;
mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog; mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
mp->m_litino = sbp->sb_inodesize - mp->m_litino = sbp->sb_inodesize -
...@@ -755,7 +772,9 @@ xfs_mountfs( ...@@ -755,7 +772,9 @@ xfs_mountfs(
goto error1; goto error1;
} }
if (!noio) { if (!noio) {
error = xfs_read_buf(mp, mp->m_ddev_targp, d - 1, 1, 0, &bp); error = xfs_read_buf(mp, mp->m_ddev_targp,
d - XFS_FSS_TO_BB(mp, 1),
XFS_FSS_TO_BB(mp, 1), 0, &bp);
if (!error) { if (!error) {
xfs_buf_relse(bp); xfs_buf_relse(bp);
} else { } else {
...@@ -775,7 +794,9 @@ xfs_mountfs( ...@@ -775,7 +794,9 @@ xfs_mountfs(
error = XFS_ERROR(E2BIG); error = XFS_ERROR(E2BIG);
goto error1; goto error1;
} }
error = xfs_read_buf(mp, mp->m_logdev_targp, d - 1, 1, 0, &bp); error = xfs_read_buf(mp, mp->m_logdev_targp,
d - XFS_LOGS_TO_BB(mp, 1),
XFS_LOGS_TO_BB(mp, 1), 0, &bp);
if (!error) { if (!error) {
xfs_buf_relse(bp); xfs_buf_relse(bp);
} else { } else {
......
...@@ -232,6 +232,7 @@ typedef struct xfs_mount { ...@@ -232,6 +232,7 @@ typedef struct xfs_mount {
__uint8_t m_mk_sharedro; /* mark shared ro on unmount */ __uint8_t m_mk_sharedro; /* mark shared ro on unmount */
__uint8_t m_inode_quiesce;/* call quiesce on new inodes. __uint8_t m_inode_quiesce;/* call quiesce on new inodes.
field governed by m_ilock */ field governed by m_ilock */
__uint8_t m_sectbb_log; /* sectlog - BBSHIFT */
__uint8_t m_dirversion; /* 1 or 2 */ __uint8_t m_dirversion; /* 1 or 2 */
xfs_dirops_t m_dirops; /* table of dir funcs */ xfs_dirops_t m_dirops; /* table of dir funcs */
int m_dirblksize; /* directory block sz--bytes */ int m_dirblksize; /* directory block sz--bytes */
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
/* /*
* Super block * Super block
* Fits into a 512-byte buffer at daddr_t 0 of each allocation group. * Fits into a sector-sized buffer at address 0 of each allocation group.
* Only the first of these is ever updated except during growfs. * Only the first of these is ever updated except during growfs.
*/ */
...@@ -140,7 +140,8 @@ typedef struct xfs_sb ...@@ -140,7 +140,8 @@ typedef struct xfs_sb
__uint32_t sb_unit; /* stripe or raid unit */ __uint32_t sb_unit; /* stripe or raid unit */
__uint32_t sb_width; /* stripe or raid width */ __uint32_t sb_width; /* stripe or raid width */
__uint8_t sb_dirblklog; /* log2 of dir block size (fsbs) */ __uint8_t sb_dirblklog; /* log2 of dir block size (fsbs) */
__uint8_t sb_dummy[3]; /* padding */ __uint8_t sb_logsectlog; /* log2 of the log sector size */
__uint16_t sb_logsectsize; /* sector size for the log, bytes */
__uint32_t sb_logsunit; /* stripe unit size for the log */ __uint32_t sb_logsunit; /* stripe unit size for the log */
} xfs_sb_t; } xfs_sb_t;
...@@ -159,7 +160,7 @@ typedef enum { ...@@ -159,7 +160,7 @@ typedef enum {
XFS_SBS_IFREE, XFS_SBS_FDBLOCKS, XFS_SBS_FREXTENTS, XFS_SBS_UQUOTINO, XFS_SBS_IFREE, XFS_SBS_FDBLOCKS, XFS_SBS_FREXTENTS, XFS_SBS_UQUOTINO,
XFS_SBS_GQUOTINO, XFS_SBS_QFLAGS, XFS_SBS_FLAGS, XFS_SBS_SHARED_VN, XFS_SBS_GQUOTINO, XFS_SBS_QFLAGS, XFS_SBS_FLAGS, XFS_SBS_SHARED_VN,
XFS_SBS_INOALIGNMT, XFS_SBS_UNIT, XFS_SBS_WIDTH, XFS_SBS_DIRBLKLOG, XFS_SBS_INOALIGNMT, XFS_SBS_UNIT, XFS_SBS_WIDTH, XFS_SBS_DIRBLKLOG,
XFS_SBS_DUMMY, XFS_SBS_LOGSUNIT, XFS_SBS_LOGSECTLOG, XFS_SBS_LOGSECTSIZE, XFS_SBS_LOGSUNIT,
XFS_SBS_FIELDCOUNT XFS_SBS_FIELDCOUNT
} xfs_sb_field_t; } xfs_sb_field_t;
...@@ -474,6 +475,28 @@ xfs_daddr_t xfs_fsb_to_daddr(struct xfs_mount *mp, xfs_fsblock_t fsbno); ...@@ -474,6 +475,28 @@ xfs_daddr_t xfs_fsb_to_daddr(struct xfs_mount *mp, xfs_fsblock_t fsbno);
XFS_FSB_TO_AGBNO(mp,fsbno)) XFS_FSB_TO_AGBNO(mp,fsbno))
#endif #endif
#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_SBP)
xfs_sb_t *xfs_buf_to_sbp(struct xfs_buf *bp);
#define XFS_BUF_TO_SBP(bp) xfs_buf_to_sbp(bp)
#else
#define XFS_BUF_TO_SBP(bp) ((xfs_sb_t *)XFS_BUF_PTR(bp))
#endif
/*
* File system sector to basic block conversions.
*/
#define XFS_FSS_TO_BB(mp,sec) ((sec) << (mp)->m_sectbb_log)
#define XFS_LOGS_TO_BB(mp,sec) ((sec) << ((mp)->m_sb.sb_logsectlog - BBSHIFT))
#define XFS_BB_TO_FSS(mp,bb) \
(((bb) + (XFS_FSS_TO_BB(mp,1) - 1)) >> (mp)->m_sectbb_log)
#define XFS_BB_TO_FSST(mp,bb) ((bb) >> (mp)->m_sectbb_log)
/*
* File system sector to byte conversions.
*/
#define XFS_FSS_TO_B(mp,sectno) ((xfs_fsize_t)(sectno) << (mp)->m_sb.sb_sectlog)
#define XFS_B_TO_FSST(mp,b) (((__uint64_t)(b)) >> (mp)->m_sb.sb_sectlog)
/* /*
* File system block to basic block conversions. * File system block to basic block conversions.
*/ */
...@@ -493,11 +516,4 @@ xfs_daddr_t xfs_fsb_to_daddr(struct xfs_mount *mp, xfs_fsblock_t fsbno); ...@@ -493,11 +516,4 @@ xfs_daddr_t xfs_fsb_to_daddr(struct xfs_mount *mp, xfs_fsblock_t fsbno);
#define XFS_B_TO_FSBT(mp,b) (((__uint64_t)(b)) >> (mp)->m_sb.sb_blocklog) #define XFS_B_TO_FSBT(mp,b) (((__uint64_t)(b)) >> (mp)->m_sb.sb_blocklog)
#define XFS_B_FSB_OFFSET(mp,b) ((b) & (mp)->m_blockmask) #define XFS_B_FSB_OFFSET(mp,b) ((b) & (mp)->m_blockmask)
#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_SBP)
xfs_sb_t *xfs_buf_to_sbp(struct xfs_buf *bp);
#define XFS_BUF_TO_SBP(bp) xfs_buf_to_sbp(bp)
#else
#define XFS_BUF_TO_SBP(bp) ((xfs_sb_t *)XFS_BUF_PTR(bp))
#endif
#endif /* __XFS_SB_H__ */ #endif /* __XFS_SB_H__ */
...@@ -393,7 +393,7 @@ xfs_mount( ...@@ -393,7 +393,7 @@ xfs_mount(
xfs_mount_t *mp; xfs_mount_t *mp;
struct block_device *ddev, *logdev, *rtdev; struct block_device *ddev, *logdev, *rtdev;
int ronly = (vfsp->vfs_flag & VFS_RDONLY); int ronly = (vfsp->vfs_flag & VFS_RDONLY);
int error = 0; int flags = 0, error;
ddev = vfsp->vfs_super->s_bdev; ddev = vfsp->vfs_super->s_bdev;
logdev = rtdev = NULL; logdev = rtdev = NULL;
...@@ -430,16 +430,10 @@ xfs_mount( ...@@ -430,16 +430,10 @@ xfs_mount(
vfs_insertbhv(vfsp, &mp->m_bhv, &xfs_vfsops, mp); vfs_insertbhv(vfsp, &mp->m_bhv, &xfs_vfsops, mp);
mp->m_ddev_targp = xfs_alloc_buftarg(ddev); mp->m_ddev_targp = xfs_alloc_buftarg(ddev);
if (rtdev != NULL) { if (rtdev)
mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev); mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev);
set_blocksize(rtdev, 512); mp->m_logdev_targp = (logdev && logdev != ddev) ?
} xfs_alloc_buftarg(logdev) : mp->m_ddev_targp;
if (logdev != NULL && logdev != ddev) {
mp->m_logdev_targp = xfs_alloc_buftarg(logdev);
set_blocksize(logdev, 512);
} else {
mp->m_logdev_targp = mp->m_ddev_targp;
}
error = xfs_start_flags(args, mp, ronly); error = xfs_start_flags(args, mp, ronly);
if (error) if (error)
...@@ -455,16 +449,16 @@ xfs_mount( ...@@ -455,16 +449,16 @@ xfs_mount(
goto error; goto error;
} }
mp->m_ddev_targp->pbr_blocksize = mp->m_sb.sb_blocksize; xfs_size_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize,
if (logdev != 0 && logdev != ddev) { mp->m_sb.sb_sectsize);
mp->m_logdev_targp->pbr_blocksize = mp->m_sb.sb_blocksize; if (logdev && logdev != ddev)
} xfs_size_buftarg(mp->m_logdev_targp, mp->m_sb.sb_blocksize,
if (rtdev != 0) { mp->m_sb.sb_logsectsize);
mp->m_rtdev_targp->pbr_blocksize = mp->m_sb.sb_blocksize; if (rtdev)
} xfs_size_buftarg(mp->m_logdev_targp, mp->m_sb.sb_blocksize,
mp->m_sb.sb_blocksize);
mp->m_cxfstype = XFS_CXFS_NOT; error = xfs_mountfs(vfsp, mp, ddev->bd_dev, flags);
error = xfs_mountfs(vfsp, mp, ddev->bd_dev, 0);
if (error) if (error)
goto error; goto error;
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment