Commit 8a6a07e8 authored by Dave Kleikamp's avatar Dave Kleikamp

Merge bk://linux.bkbits.net/linux-2.5

into hostme.bitkeeper.com:/repos/j/jfs/linux-2.5
parents 1c400de9 53723704
...@@ -65,11 +65,13 @@ static int jfs_open(struct inode *inode, struct file *file) ...@@ -65,11 +65,13 @@ static int jfs_open(struct inode *inode, struct file *file)
if (S_ISREG(inode->i_mode) && file->f_mode & FMODE_WRITE && if (S_ISREG(inode->i_mode) && file->f_mode & FMODE_WRITE &&
(inode->i_size == 0)) { (inode->i_size == 0)) {
struct jfs_inode_info *ji = JFS_IP(inode); struct jfs_inode_info *ji = JFS_IP(inode);
spin_lock_irq(&ji->ag_lock);
if (ji->active_ag == -1) { if (ji->active_ag == -1) {
ji->active_ag = ji->agno; ji->active_ag = ji->agno;
atomic_inc( atomic_inc(
&JFS_SBI(inode->i_sb)->bmap->db_active[ji->agno]); &JFS_SBI(inode->i_sb)->bmap->db_active[ji->agno]);
} }
spin_unlock_irq(&ji->ag_lock);
} }
return 0; return 0;
...@@ -78,11 +80,13 @@ static int jfs_release(struct inode *inode, struct file *file) ...@@ -78,11 +80,13 @@ static int jfs_release(struct inode *inode, struct file *file)
{ {
struct jfs_inode_info *ji = JFS_IP(inode); struct jfs_inode_info *ji = JFS_IP(inode);
spin_lock_irq(&ji->ag_lock);
if (ji->active_ag != -1) { if (ji->active_ag != -1) {
struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap; struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
atomic_dec(&bmap->db_active[ji->active_ag]); atomic_dec(&bmap->db_active[ji->active_ag]);
ji->active_ag = -1; ji->active_ag = -1;
} }
spin_unlock_irq(&ji->ag_lock);
return 0; return 0;
} }
......
...@@ -1204,6 +1204,12 @@ static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno, ...@@ -1204,6 +1204,12 @@ static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno,
s8 *leaf; s8 *leaf;
u32 mask; u32 mask;
if (dp->tree.leafidx != cpu_to_le32(LEAFIND)) {
jfs_error(bmp->db_ipbmap->i_sb,
"dbAllocNext: Corrupt dmap page");
return -EIO;
}
/* pick up a pointer to the leaves of the dmap tree. /* pick up a pointer to the leaves of the dmap tree.
*/ */
leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx); leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx);
...@@ -1327,7 +1333,15 @@ dbAllocNear(struct bmap * bmp, ...@@ -1327,7 +1333,15 @@ dbAllocNear(struct bmap * bmp,
struct dmap * dp, s64 blkno, int nblocks, int l2nb, s64 * results) struct dmap * dp, s64 blkno, int nblocks, int l2nb, s64 * results)
{ {
int word, lword, rc; int word, lword, rc;
s8 *leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx); s8 *leaf;
if (dp->tree.leafidx != cpu_to_le32(LEAFIND)) {
jfs_error(bmp->db_ipbmap->i_sb,
"dbAllocNear: Corrupt dmap page");
return -EIO;
}
leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx);
/* determine the word within the dmap that holds the hint /* determine the word within the dmap that holds the hint
* (i.e. blkno). also, determine the last word in the dmap * (i.e. blkno). also, determine the last word in the dmap
...@@ -1489,6 +1503,13 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results) ...@@ -1489,6 +1503,13 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
dcp = (struct dmapctl *) mp->data; dcp = (struct dmapctl *) mp->data;
budmin = dcp->budmin; budmin = dcp->budmin;
if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) {
jfs_error(bmp->db_ipbmap->i_sb,
"dbAllocAG: Corrupt dmapctl page");
release_metapage(mp);
return -EIO;
}
/* search the subtree(s) of the dmap control page that describes /* search the subtree(s) of the dmap control page that describes
* the allocation group, looking for sufficient free space. to begin, * the allocation group, looking for sufficient free space. to begin,
* determine how many allocation groups are represented in a dmap * determine how many allocation groups are represented in a dmap
...@@ -1697,6 +1718,13 @@ static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno) ...@@ -1697,6 +1718,13 @@ static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
dcp = (struct dmapctl *) mp->data; dcp = (struct dmapctl *) mp->data;
budmin = dcp->budmin; budmin = dcp->budmin;
if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) {
jfs_error(bmp->db_ipbmap->i_sb,
"dbFindCtl: Corrupt dmapctl page");
release_metapage(mp);
return -EIO;
}
/* search the tree within the dmap control page for /* search the tree within the dmap control page for
* sufficent free space. if sufficient free space is found, * sufficent free space. if sufficient free space is found,
* dbFindLeaf() returns the index of the leaf at which * dbFindLeaf() returns the index of the leaf at which
...@@ -2459,6 +2487,13 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level) ...@@ -2459,6 +2487,13 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
return -EIO; return -EIO;
dcp = (struct dmapctl *) mp->data; dcp = (struct dmapctl *) mp->data;
if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) {
jfs_error(bmp->db_ipbmap->i_sb,
"dbAdjCtl: Corrupt dmapctl page");
release_metapage(mp);
return -EIO;
}
/* determine the leaf number corresponding to the block and /* determine the leaf number corresponding to the block and
* the index within the dmap control tree. * the index within the dmap control tree.
*/ */
......
...@@ -553,6 +553,7 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno) ...@@ -553,6 +553,7 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
if (S_ISREG(ip->i_mode) && (ji->fileset == FILESYSTEM_I)) { if (S_ISREG(ip->i_mode) && (ji->fileset == FILESYSTEM_I)) {
ag = BLKTOAG(daddr, sbi); ag = BLKTOAG(daddr, sbi);
spin_lock_irq(&ji->ag_lock);
if (ji->active_ag == -1) { if (ji->active_ag == -1) {
atomic_inc(&bmp->db_active[ag]); atomic_inc(&bmp->db_active[ag]);
ji->active_ag = ag; ji->active_ag = ag;
...@@ -561,6 +562,7 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno) ...@@ -561,6 +562,7 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
atomic_inc(&bmp->db_active[ag]); atomic_inc(&bmp->db_active[ag]);
ji->active_ag = ag; ji->active_ag = ag;
} }
spin_unlock_irq(&ji->ag_lock);
} }
return (0); return (0);
......
...@@ -1280,6 +1280,7 @@ int diFree(struct inode *ip) ...@@ -1280,6 +1280,7 @@ int diFree(struct inode *ip)
* to be freed by the transaction; * to be freed by the transaction;
*/ */
tid = txBegin(ipimap->i_sb, COMMIT_FORCE); tid = txBegin(ipimap->i_sb, COMMIT_FORCE);
down(&JFS_IP(ipimap)->commit_sem);
/* acquire tlock of the iag page of the freed ixad /* acquire tlock of the iag page of the freed ixad
* to force the page NOHOMEOK (even though no data is * to force the page NOHOMEOK (even though no data is
...@@ -1312,6 +1313,7 @@ int diFree(struct inode *ip) ...@@ -1312,6 +1313,7 @@ int diFree(struct inode *ip)
rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE); rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE);
txEnd(tid); txEnd(tid);
up(&JFS_IP(ipimap)->commit_sem);
/* unlock the AG inode map information */ /* unlock the AG inode map information */
AG_UNLOCK(imap, agno); AG_UNLOCK(imap, agno);
...@@ -2622,10 +2624,13 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp) ...@@ -2622,10 +2624,13 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
*/ */
#endif /* _STILL_TO_PORT */ #endif /* _STILL_TO_PORT */
tid = txBegin(sb, COMMIT_FORCE); tid = txBegin(sb, COMMIT_FORCE);
down(&JFS_IP(ipimap)->commit_sem);
/* update the inode map addressing structure to point to it */ /* update the inode map addressing structure to point to it */
if ((rc = if ((rc =
xtInsert(tid, ipimap, 0, blkno, xlen, &xaddr, 0))) { xtInsert(tid, ipimap, 0, blkno, xlen, &xaddr, 0))) {
txEnd(tid);
up(&JFS_IP(ipimap)->commit_sem);
/* Free the blocks allocated for the iag since it was /* Free the blocks allocated for the iag since it was
* not successfully added to the inode map * not successfully added to the inode map
*/ */
...@@ -2650,6 +2655,7 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp) ...@@ -2650,6 +2655,7 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE); rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE);
txEnd(tid); txEnd(tid);
up(&JFS_IP(ipimap)->commit_sem);
duplicateIXtree(sb, blkno, xlen, &xaddr); duplicateIXtree(sb, blkno, xlen, &xaddr);
......
...@@ -53,6 +53,7 @@ struct jfs_inode_info { ...@@ -53,6 +53,7 @@ struct jfs_inode_info {
lid_t blid; /* lid of pseudo buffer? */ lid_t blid; /* lid of pseudo buffer? */
lid_t atlhead; /* anonymous tlock list head */ lid_t atlhead; /* anonymous tlock list head */
lid_t atltail; /* anonymous tlock list tail */ lid_t atltail; /* anonymous tlock list tail */
spinlock_t ag_lock; /* protects active_ag */
struct list_head anon_inode_list; /* inodes having anonymous txns */ struct list_head anon_inode_list; /* inodes having anonymous txns */
/* /*
* rdwrlock serializes xtree between reads & writes and synchronizes * rdwrlock serializes xtree between reads & writes and synchronizes
......
...@@ -225,8 +225,16 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock, ...@@ -225,8 +225,16 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
if (absolute) if (absolute)
mapping = inode->i_sb->s_bdev->bd_inode->i_mapping; mapping = inode->i_sb->s_bdev->bd_inode->i_mapping;
else else {
/*
* If an nfs client tries to read an inode that is larger
* than any existing inodes, we may try to read past the
* end of the inode map
*/
if ((lblock << inode->i_blkbits) >= inode->i_size)
return NULL;
mapping = inode->i_mapping; mapping = inode->i_mapping;
}
hash_ptr = meta_hash(mapping, lblock); hash_ptr = meta_hash(mapping, lblock);
again: again:
......
...@@ -1071,8 +1071,10 @@ xtSplitUp(tid_t tid, ...@@ -1071,8 +1071,10 @@ xtSplitUp(tid_t tid,
*/ */
/* get/pin the parent page <sp> */ /* get/pin the parent page <sp> */
XT_GETPAGE(ip, parent->bn, smp, PSIZE, sp, rc); XT_GETPAGE(ip, parent->bn, smp, PSIZE, sp, rc);
if (rc) if (rc) {
goto errout2; XT_PUTPAGE(rcmp);
return rc;
}
/* /*
* The new key entry goes ONE AFTER the index of parent entry, * The new key entry goes ONE AFTER the index of parent entry,
...@@ -1106,8 +1108,10 @@ xtSplitUp(tid_t tid, ...@@ -1106,8 +1108,10 @@ xtSplitUp(tid_t tid,
rc = (sp->header.flag & BT_ROOT) ? rc = (sp->header.flag & BT_ROOT) ?
xtSplitRoot(tid, ip, split, &rmp) : xtSplitRoot(tid, ip, split, &rmp) :
xtSplitPage(tid, ip, split, &rmp, &rbn); xtSplitPage(tid, ip, split, &rmp, &rbn);
if (rc) if (rc) {
goto errout1; XT_PUTPAGE(smp);
return rc;
}
XT_PUTPAGE(smp); XT_PUTPAGE(smp);
/* keep new child page <rp> pinned */ /* keep new child page <rp> pinned */
...@@ -1170,19 +1174,6 @@ xtSplitUp(tid_t tid, ...@@ -1170,19 +1174,6 @@ xtSplitUp(tid_t tid,
XT_PUTPAGE(rmp); XT_PUTPAGE(rmp);
return 0; return 0;
/*
* If something fails in the above loop we were already walking back
* up the tree and the tree is now inconsistent.
* release all pages we're holding.
*/
errout1:
XT_PUTPAGE(smp);
errout2:
XT_PUTPAGE(rcmp);
return rc;
} }
...@@ -3504,7 +3495,17 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag) ...@@ -3504,7 +3495,17 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
* a page that was formerly to the right, let's make sure that the * a page that was formerly to the right, let's make sure that the
* next pointer is zero. * next pointer is zero.
*/ */
p->header.next = 0; if (p->header.next) {
if (log)
/*
* Make sure this change to the header is logged.
* If we really truncate this leaf, the flag
* will be changed to tlckTRUNCATE
*/
tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW);
BT_MARK_DIRTY(mp, ip);
p->header.next = 0;
}
freed = 0; freed = 0;
......
...@@ -141,10 +141,13 @@ static void jfs_destroy_inode(struct inode *inode) ...@@ -141,10 +141,13 @@ static void jfs_destroy_inode(struct inode *inode)
{ {
struct jfs_inode_info *ji = JFS_IP(inode); struct jfs_inode_info *ji = JFS_IP(inode);
spin_lock_irq(&ji->ag_lock);
if (ji->active_ag != -1) { if (ji->active_ag != -1) {
struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap; struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
atomic_dec(&bmap->db_active[ji->active_ag]); atomic_dec(&bmap->db_active[ji->active_ag]);
ji->active_ag = -1;
} }
spin_unlock_irq(&ji->ag_lock);
#ifdef CONFIG_JFS_POSIX_ACL #ifdef CONFIG_JFS_POSIX_ACL
if (ji->i_acl != JFS_ACL_NOT_CACHED) { if (ji->i_acl != JFS_ACL_NOT_CACHED) {
...@@ -559,6 +562,7 @@ static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags) ...@@ -559,6 +562,7 @@ static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
init_rwsem(&jfs_ip->rdwrlock); init_rwsem(&jfs_ip->rdwrlock);
init_MUTEX(&jfs_ip->commit_sem); init_MUTEX(&jfs_ip->commit_sem);
init_rwsem(&jfs_ip->xattr_sem); init_rwsem(&jfs_ip->xattr_sem);
spin_lock_init(&jfs_ip->ag_lock);
jfs_ip->active_ag = -1; jfs_ip->active_ag = -1;
#ifdef CONFIG_JFS_POSIX_ACL #ifdef CONFIG_JFS_POSIX_ACL
jfs_ip->i_acl = JFS_ACL_NOT_CACHED; jfs_ip->i_acl = JFS_ACL_NOT_CACHED;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment