Commit 4541d16c authored by Fred Isaman's avatar Fred Isaman Committed by Trond Myklebust

pnfs: change how lsegs are removed from layout list

This is to prepare the way for sensible io draining.  Instead of just
removing the lseg from the list, we instead clear the VALID flag
(preventing new io from grabbing references to the lseg) and remove
the reference holding it in the list.  Thus the lseg will be removed
once any io in progress completes and any references still held are
dropped.
Signed-off-by: default avatarFred Isaman <iisaman@netapp.com>
Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent fd6002e9
...@@ -1410,9 +1410,9 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) ...@@ -1410,9 +1410,9 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
*/ */
void nfs4_evict_inode(struct inode *inode) void nfs4_evict_inode(struct inode *inode)
{ {
pnfs_destroy_layout(NFS_I(inode));
truncate_inode_pages(&inode->i_data, 0); truncate_inode_pages(&inode->i_data, 0);
end_writeback(inode); end_writeback(inode);
pnfs_destroy_layout(NFS_I(inode));
/* If we are holding a delegation, return it! */ /* If we are holding a delegation, return it! */
nfs_inode_return_delegation_noreclaim(inode); nfs_inode_return_delegation_noreclaim(inode);
/* First call standard NFS clear_inode() code */ /* First call standard NFS clear_inode() code */
......
...@@ -211,68 +211,109 @@ static void ...@@ -211,68 +211,109 @@ static void
init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg) init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
{ {
INIT_LIST_HEAD(&lseg->pls_list); INIT_LIST_HEAD(&lseg->pls_list);
kref_init(&lseg->pls_refcount); atomic_set(&lseg->pls_refcount, 1);
smp_mb();
set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
lseg->pls_layout = lo; lseg->pls_layout = lo;
} }
/* Called without i_lock held, as the free_lseg call may sleep */ static void free_lseg(struct pnfs_layout_segment *lseg)
static void
destroy_lseg(struct kref *kref)
{ {
struct pnfs_layout_segment *lseg =
container_of(kref, struct pnfs_layout_segment, pls_refcount);
struct inode *ino = lseg->pls_layout->plh_inode; struct inode *ino = lseg->pls_layout->plh_inode;
dprintk("--> %s\n", __func__);
NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg); NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
/* Matched by get_layout_hdr in pnfs_insert_layout */ /* Matched by get_layout_hdr in pnfs_insert_layout */
put_layout_hdr(ino); put_layout_hdr(ino);
} }
static void /* The use of tmp_list is necessary because pnfs_curr_ld->free_lseg
put_lseg(struct pnfs_layout_segment *lseg) * could sleep, so must be called outside of the lock.
* Returns 1 if object was removed, otherwise return 0.
*/
static int
put_lseg_locked(struct pnfs_layout_segment *lseg,
struct list_head *tmp_list)
{ {
if (!lseg) dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
return; atomic_read(&lseg->pls_refcount),
test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
if (atomic_dec_and_test(&lseg->pls_refcount)) {
struct inode *ino = lseg->pls_layout->plh_inode;
dprintk("%s: lseg %p ref %d\n", __func__, lseg, BUG_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
atomic_read(&lseg->pls_refcount.refcount)); list_del(&lseg->pls_list);
kref_put(&lseg->pls_refcount, destroy_lseg); if (list_empty(&lseg->pls_layout->plh_segs)) {
struct nfs_client *clp;
clp = NFS_SERVER(ino)->nfs_client;
spin_lock(&clp->cl_lock);
/* List does not take a reference, so no need for put here */
list_del_init(&lseg->pls_layout->plh_layouts);
spin_unlock(&clp->cl_lock);
}
list_add(&lseg->pls_list, tmp_list);
return 1;
}
return 0;
} }
static void static bool
pnfs_clear_lseg_list(struct pnfs_layout_hdr *lo, struct list_head *tmp_list) should_free_lseg(u32 lseg_iomode, u32 recall_iomode)
{ {
struct pnfs_layout_segment *lseg, *next; return (recall_iomode == IOMODE_ANY ||
struct nfs_client *clp; lseg_iomode == recall_iomode);
}
dprintk("%s:Begin lo %p\n", __func__, lo); /* Returns 1 if lseg is removed from list, 0 otherwise */
static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
struct list_head *tmp_list)
{
int rv = 0;
assert_spin_locked(&lo->plh_inode->i_lock); if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) { /* Remove the reference keeping the lseg in the
dprintk("%s: freeing lseg %p\n", __func__, lseg); * list. It will now be removed when all
list_move(&lseg->pls_list, tmp_list); * outstanding io is finished.
*/
rv = put_lseg_locked(lseg, tmp_list);
} }
clp = NFS_SERVER(lo->plh_inode)->nfs_client; return rv;
spin_lock(&clp->cl_lock); }
/* List does not take a reference, so no need for put here */
list_del_init(&lo->plh_layouts);
spin_unlock(&clp->cl_lock);
dprintk("%s:Return\n", __func__); /* Returns count of number of matching invalid lsegs remaining in list
* after call.
*/
static int
mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
struct list_head *tmp_list,
u32 iomode)
{
struct pnfs_layout_segment *lseg, *next;
int invalid = 0, removed = 0;
dprintk("%s:Begin lo %p\n", __func__, lo);
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
if (should_free_lseg(lseg->pls_range.iomode, iomode)) {
dprintk("%s: freeing lseg %p iomode %d "
"offset %llu length %llu\n", __func__,
lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
lseg->pls_range.length);
invalid++;
removed += mark_lseg_invalid(lseg, tmp_list);
}
dprintk("%s:Return %i\n", __func__, invalid - removed);
return invalid - removed;
} }
static void static void
pnfs_free_lseg_list(struct list_head *tmp_list) pnfs_free_lseg_list(struct list_head *free_me)
{ {
struct pnfs_layout_segment *lseg; struct pnfs_layout_segment *lseg, *tmp;
while (!list_empty(tmp_list)) { list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
lseg = list_entry(tmp_list->next, struct pnfs_layout_segment,
pls_list);
dprintk("%s calling put_lseg on %p\n", __func__, lseg);
list_del(&lseg->pls_list); list_del(&lseg->pls_list);
put_lseg(lseg); free_lseg(lseg);
} }
} }
...@@ -285,7 +326,8 @@ pnfs_destroy_layout(struct nfs_inode *nfsi) ...@@ -285,7 +326,8 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
spin_lock(&nfsi->vfs_inode.i_lock); spin_lock(&nfsi->vfs_inode.i_lock);
lo = nfsi->layout; lo = nfsi->layout;
if (lo) { if (lo) {
pnfs_clear_lseg_list(lo, &tmp_list); set_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags);
mark_matching_lsegs_invalid(lo, &tmp_list, IOMODE_ANY);
/* Matched by refcount set to 1 in alloc_init_layout_hdr */ /* Matched by refcount set to 1 in alloc_init_layout_hdr */
put_layout_hdr_locked(lo); put_layout_hdr_locked(lo);
} }
...@@ -477,9 +519,12 @@ pnfs_find_alloc_layout(struct inode *ino) ...@@ -477,9 +519,12 @@ pnfs_find_alloc_layout(struct inode *ino)
dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout); dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
assert_spin_locked(&ino->i_lock); assert_spin_locked(&ino->i_lock);
if (nfsi->layout) if (nfsi->layout) {
return nfsi->layout; if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags))
return NULL;
else
return nfsi->layout;
}
spin_unlock(&ino->i_lock); spin_unlock(&ino->i_lock);
new = alloc_init_layout_hdr(ino); new = alloc_init_layout_hdr(ino);
spin_lock(&ino->i_lock); spin_lock(&ino->i_lock);
...@@ -520,7 +565,8 @@ pnfs_has_layout(struct pnfs_layout_hdr *lo, u32 iomode) ...@@ -520,7 +565,8 @@ pnfs_has_layout(struct pnfs_layout_hdr *lo, u32 iomode)
assert_spin_locked(&lo->plh_inode->i_lock); assert_spin_locked(&lo->plh_inode->i_lock);
list_for_each_entry(lseg, &lo->plh_segs, pls_list) { list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
if (is_matching_lseg(lseg, iomode)) { if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
is_matching_lseg(lseg, iomode)) {
ret = lseg; ret = lseg;
break; break;
} }
...@@ -529,7 +575,7 @@ pnfs_has_layout(struct pnfs_layout_hdr *lo, u32 iomode) ...@@ -529,7 +575,7 @@ pnfs_has_layout(struct pnfs_layout_hdr *lo, u32 iomode)
} }
dprintk("%s:Return lseg %p ref %d\n", dprintk("%s:Return lseg %p ref %d\n",
__func__, ret, ret ? atomic_read(&ret->pls_refcount.refcount) : 0); __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
return ret; return ret;
} }
......
...@@ -30,10 +30,15 @@ ...@@ -30,10 +30,15 @@
#ifndef FS_NFS_PNFS_H #ifndef FS_NFS_PNFS_H
#define FS_NFS_PNFS_H #define FS_NFS_PNFS_H
enum {
NFS_LSEG_VALID = 0, /* cleared when lseg is recalled/returned */
};
struct pnfs_layout_segment { struct pnfs_layout_segment {
struct list_head pls_list; struct list_head pls_list;
struct pnfs_layout_range pls_range; struct pnfs_layout_range pls_range;
struct kref pls_refcount; atomic_t pls_refcount;
unsigned long pls_flags;
struct pnfs_layout_hdr *pls_layout; struct pnfs_layout_hdr *pls_layout;
}; };
...@@ -44,6 +49,7 @@ struct pnfs_layout_segment { ...@@ -44,6 +49,7 @@ struct pnfs_layout_segment {
enum { enum {
NFS_LAYOUT_RO_FAILED = 0, /* get ro layout failed stop trying */ NFS_LAYOUT_RO_FAILED = 0, /* get ro layout failed stop trying */
NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */ NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */
NFS_LAYOUT_DESTROYED, /* no new use of layout allowed */
}; };
/* Per-layout driver specific registration structure */ /* Per-layout driver specific registration structure */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment