Commit 1442d167 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-3.2' of git://linux-nfs.org/~bfields/linux

* 'for-3.2' of git://linux-nfs.org/~bfields/linux: (103 commits)
  nfs41: implement DESTROY_CLIENTID operation
  nfsd4: typo logical vs bitwise negate for want_mask
  nfsd4: allow NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL | NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED
  nfsd4: seq->status_flags may be used unitialized
  nfsd41: use SEQ4_STATUS_BACKCHANNEL_FAULT when cb_sequence is invalid
  nfsd4: implement new 4.1 open reclaim types
  nfsd4: remove unneeded CLAIM_DELEGATE_CUR workaround
  nfsd4: warn on open failure after create
  nfsd4: preallocate open stateid in process_open1()
  nfsd4: do idr preallocation with stateid allocation
  nfsd4: preallocate nfs4_file in process_open1()
  nfsd4: clean up open owners on OPEN failure
  nfsd4: simplify process_open1 logic
  nfsd4: make is_open_owner boolean
  nfsd4: centralize renew_client() calls
  nfsd4: typo logical vs bitwise negate
  nfs: fix bug about IPv6 address scope checking
  nfsd4: more robust ignoring of WANT bits in OPEN
  nfsd4: move name-length checks to xdr
  nfsd4: move access/deny validity checks to xdr code
  ...
parents 7e0bb71e 345c2842
......@@ -51,8 +51,6 @@
#define F_EXLCK 16 /* or 3 */
#define F_SHLCK 32 /* or 4 */
#define F_INPROGRESS 64
#include <asm-generic/fcntl.h>
#endif
......@@ -37,7 +37,6 @@
#include <linux/dirent.h>
#include <linux/fsnotify.h>
#include <linux/highuid.h>
#include <linux/nfsd/syscall.h>
#include <linux/personality.h>
#include <linux/rwsem.h>
#include <linux/tsacct_kern.h>
......
......@@ -316,14 +316,8 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
struct hlist_node *pos;
struct nlm_host *host = NULL;
struct nsm_handle *nsm = NULL;
struct sockaddr_in sin = {
.sin_family = AF_INET,
};
struct sockaddr_in6 sin6 = {
.sin6_family = AF_INET6,
};
struct sockaddr *src_sap;
size_t src_len = rqstp->rq_addrlen;
struct sockaddr *src_sap = svc_daddr(rqstp);
size_t src_len = rqstp->rq_daddrlen;
struct nlm_lookup_host_info ni = {
.server = 1,
.sap = svc_addr(rqstp),
......@@ -340,21 +334,6 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
mutex_lock(&nlm_host_mutex);
switch (ni.sap->sa_family) {
case AF_INET:
sin.sin_addr.s_addr = rqstp->rq_daddr.addr.s_addr;
src_sap = (struct sockaddr *)&sin;
break;
case AF_INET6:
ipv6_addr_copy(&sin6.sin6_addr, &rqstp->rq_daddr.addr6);
src_sap = (struct sockaddr *)&sin6;
break;
default:
dprintk("lockd: %s failed; unrecognized address family\n",
__func__);
goto out;
}
if (time_after_eq(jiffies, next_gc))
nlm_gc_hosts();
......
......@@ -282,7 +282,7 @@ int lockd_up(void)
/*
* Create the kernel thread and wait for it to start.
*/
nlmsvc_rqst = svc_prepare_thread(serv, &serv->sv_pools[0]);
nlmsvc_rqst = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
if (IS_ERR(nlmsvc_rqst)) {
error = PTR_ERR(nlmsvc_rqst);
nlmsvc_rqst = NULL;
......
......@@ -133,6 +133,20 @@
#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
#define IS_LEASE(fl) (fl->fl_flags & FL_LEASE)
static bool lease_breaking(struct file_lock *fl)
{
return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
}
static int target_leasetype(struct file_lock *fl)
{
if (fl->fl_flags & FL_UNLOCK_PENDING)
return F_UNLCK;
if (fl->fl_flags & FL_DOWNGRADE_PENDING)
return F_RDLCK;
return fl->fl_type;
}
int leases_enable = 1;
int lease_break_time = 45;
......@@ -1119,6 +1133,17 @@ int locks_mandatory_area(int read_write, struct inode *inode,
EXPORT_SYMBOL(locks_mandatory_area);
static void lease_clear_pending(struct file_lock *fl, int arg)
{
switch (arg) {
case F_UNLCK:
fl->fl_flags &= ~FL_UNLOCK_PENDING;
/* fall through: */
case F_RDLCK:
fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
}
}
/* We already had a lease on this file; just change its type */
int lease_modify(struct file_lock **before, int arg)
{
......@@ -1127,6 +1152,7 @@ int lease_modify(struct file_lock **before, int arg)
if (error)
return error;
lease_clear_pending(fl, arg);
locks_wake_up_blocks(fl);
if (arg == F_UNLCK)
locks_delete_lock(before);
......@@ -1135,19 +1161,25 @@ int lease_modify(struct file_lock **before, int arg)
EXPORT_SYMBOL(lease_modify);
static bool past_time(unsigned long then)
{
if (!then)
/* 0 is a special value meaning "this never expires": */
return false;
return time_after(jiffies, then);
}
static void time_out_leases(struct inode *inode)
{
struct file_lock **before;
struct file_lock *fl;
before = &inode->i_flock;
while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) {
if ((fl->fl_break_time == 0)
|| time_before(jiffies, fl->fl_break_time)) {
before = &fl->fl_next;
continue;
}
lease_modify(before, fl->fl_type & ~F_INPROGRESS);
while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) {
if (past_time(fl->fl_downgrade_time))
lease_modify(before, F_RDLCK);
if (past_time(fl->fl_break_time))
lease_modify(before, F_UNLCK);
if (fl == *before) /* lease_modify may have freed fl */
before = &fl->fl_next;
}
......@@ -1165,7 +1197,7 @@ static void time_out_leases(struct inode *inode)
*/
int __break_lease(struct inode *inode, unsigned int mode)
{
int error = 0, future;
int error = 0;
struct file_lock *new_fl, *flock;
struct file_lock *fl;
unsigned long break_time;
......@@ -1182,24 +1214,13 @@ int __break_lease(struct inode *inode, unsigned int mode)
if ((flock == NULL) || !IS_LEASE(flock))
goto out;
if (!locks_conflict(flock, new_fl))
goto out;
for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
if (fl->fl_owner == current->files)
i_have_this_lease = 1;
if (want_write) {
/* If we want write access, we have to revoke any lease. */
future = F_UNLCK | F_INPROGRESS;
} else if (flock->fl_type & F_INPROGRESS) {
/* If the lease is already being broken, we just leave it */
future = flock->fl_type;
} else if (flock->fl_type & F_WRLCK) {
/* Downgrade the exclusive lease to a read-only lease. */
future = F_RDLCK | F_INPROGRESS;
} else {
/* the existing lease was read-only, so we can read too. */
goto out;
}
if (IS_ERR(new_fl) && !i_have_this_lease
&& ((mode & O_NONBLOCK) == 0)) {
error = PTR_ERR(new_fl);
......@@ -1214,12 +1235,18 @@ int __break_lease(struct inode *inode, unsigned int mode)
}
for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
if (fl->fl_type != future) {
fl->fl_type = future;
if (want_write) {
if (fl->fl_flags & FL_UNLOCK_PENDING)
continue;
fl->fl_flags |= FL_UNLOCK_PENDING;
fl->fl_break_time = break_time;
/* lease must have lmops break callback */
fl->fl_lmops->lm_break(fl);
} else {
if (lease_breaking(flock))
continue;
fl->fl_flags |= FL_DOWNGRADE_PENDING;
fl->fl_downgrade_time = break_time;
}
fl->fl_lmops->lm_break(fl);
}
if (i_have_this_lease || (mode & O_NONBLOCK)) {
......@@ -1243,10 +1270,13 @@ int __break_lease(struct inode *inode, unsigned int mode)
if (error >= 0) {
if (error == 0)
time_out_leases(inode);
/* Wait for the next lease that has not been broken yet */
/*
* Wait for the next conflicting lease that has not been
* broken yet
*/
for (flock = inode->i_flock; flock && IS_LEASE(flock);
flock = flock->fl_next) {
if (flock->fl_type & F_INPROGRESS)
if (locks_conflict(new_fl, flock))
goto restart;
}
error = 0;
......@@ -1314,7 +1344,7 @@ int fcntl_getlease(struct file *filp)
for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
fl = fl->fl_next) {
if (fl->fl_file == filp) {
type = fl->fl_type & ~F_INPROGRESS;
type = target_leasetype(fl);
break;
}
}
......@@ -1322,41 +1352,15 @@ int fcntl_getlease(struct file *filp)
return type;
}
/**
* generic_setlease - sets a lease on an open file
* @filp: file pointer
* @arg: type of lease to obtain
* @flp: input - file_lock to use, output - file_lock inserted
*
* The (input) flp->fl_lmops->lm_break function is required
* by break_lease().
*
* Called with file_lock_lock held.
*/
int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
int generic_add_lease(struct file *filp, long arg, struct file_lock **flp)
{
struct file_lock *fl, **before, **my_before = NULL, *lease;
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
int error, rdlease_count = 0, wrlease_count = 0;
int error;
lease = *flp;
error = -EACCES;
if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE))
goto out;
error = -EINVAL;
if (!S_ISREG(inode->i_mode))
goto out;
error = security_file_lock(filp, arg);
if (error)
goto out;
time_out_leases(inode);
BUG_ON(!(*flp)->fl_lmops->lm_break);
if (arg != F_UNLCK) {
error = -EAGAIN;
if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
goto out;
......@@ -1364,7 +1368,6 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
&& ((dentry->d_count > 1)
|| (atomic_read(&inode->i_count) > 1)))
goto out;
}
/*
* At this point, we know that if there is an exclusive
......@@ -1374,26 +1377,27 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
* then the file is not open by anyone (including us)
* except for this filp.
*/
error = -EAGAIN;
for (before = &inode->i_flock;
((fl = *before) != NULL) && IS_LEASE(fl);
before = &fl->fl_next) {
if (fl->fl_file == filp)
if (fl->fl_file == filp) {
my_before = before;
else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
continue;
}
/*
* Someone is in the process of opening this
* file for writing so we may not take an
* exclusive lease on it.
* No exclusive leases if someone else has a lease on
* this file:
*/
wrlease_count++;
else
rdlease_count++;
}
error = -EAGAIN;
if ((arg == F_RDLCK && (wrlease_count > 0)) ||
(arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
if (arg == F_WRLCK)
goto out;
/*
* Modifying our existing lease is OK, but no getting a
* new lease if someone else is opening for write:
*/
if (fl->fl_flags & FL_UNLOCK_PENDING)
goto out;
}
if (my_before != NULL) {
error = lease->fl_lmops->lm_change(my_before, arg);
......@@ -1402,9 +1406,6 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
goto out;
}
if (arg == F_UNLCK)
goto out;
error = -EINVAL;
if (!leases_enable)
goto out;
......@@ -1415,6 +1416,62 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
out:
return error;
}
int generic_delete_lease(struct file *filp, struct file_lock **flp)
{
struct file_lock *fl, **before;
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
for (before = &inode->i_flock;
((fl = *before) != NULL) && IS_LEASE(fl);
before = &fl->fl_next) {
if (fl->fl_file != filp)
continue;
return (*flp)->fl_lmops->lm_change(before, F_UNLCK);
}
return -EAGAIN;
}
/**
* generic_setlease - sets a lease on an open file
* @filp: file pointer
* @arg: type of lease to obtain
* @flp: input - file_lock to use, output - file_lock inserted
*
* The (input) flp->fl_lmops->lm_break function is required
* by break_lease().
*
* Called with file_lock_lock held.
*/
int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
{
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
int error;
if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE))
return -EACCES;
if (!S_ISREG(inode->i_mode))
return -EINVAL;
error = security_file_lock(filp, arg);
if (error)
return error;
time_out_leases(inode);
BUG_ON(!(*flp)->fl_lmops->lm_break);
switch (arg) {
case F_UNLCK:
return generic_delete_lease(filp, flp);
case F_RDLCK:
case F_WRLCK:
return generic_add_lease(filp, arg, flp);
default:
BUG();
}
}
EXPORT_SYMBOL(generic_setlease);
static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
......@@ -2126,7 +2183,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
}
} else if (IS_LEASE(fl)) {
seq_printf(f, "LEASE ");
if (fl->fl_type & F_INPROGRESS)
if (lease_breaking(fl))
seq_printf(f, "BREAKING ");
else if (fl->fl_file)
seq_printf(f, "ACTIVE ");
......@@ -2142,7 +2199,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
: (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
} else {
seq_printf(f, "%s ",
(fl->fl_type & F_INPROGRESS)
(lease_breaking(fl))
? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
: (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
}
......
......@@ -125,7 +125,7 @@ nfs4_callback_up(struct svc_serv *serv)
else
goto out_err;
return svc_prepare_thread(serv, &serv->sv_pools[0]);
return svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
out_err:
if (ret == 0)
......@@ -199,7 +199,7 @@ nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
INIT_LIST_HEAD(&serv->sv_cb_list);
spin_lock_init(&serv->sv_cb_lock);
init_waitqueue_head(&serv->sv_cb_waitq);
rqstp = svc_prepare_thread(serv, &serv->sv_pools[0]);
rqstp = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
if (IS_ERR(rqstp)) {
svc_xprt_put(serv->sv_bc_xprt);
serv->sv_bc_xprt = NULL;
......
......@@ -336,11 +336,12 @@ static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sa1;
const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sa2;
if (ipv6_addr_scope(&sin1->sin6_addr) == IPV6_ADDR_SCOPE_LINKLOCAL &&
sin1->sin6_scope_id != sin2->sin6_scope_id)
if (!ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr))
return 0;
else if (ipv6_addr_type(&sin1->sin6_addr) & IPV6_ADDR_LINKLOCAL)
return sin1->sin6_scope_id == sin2->sin6_scope_id;
return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr);
return 1;
}
#else /* !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) */
static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
......
......@@ -13,30 +13,6 @@
struct idmap;
/*
* In a seqid-mutating op, this macro controls which error return
* values trigger incrementation of the seqid.
*
* from rfc 3010:
* The client MUST monotonically increment the sequence number for the
* CLOSE, LOCK, LOCKU, OPEN, OPEN_CONFIRM, and OPEN_DOWNGRADE
* operations. This is true even in the event that the previous
* operation that used the sequence number received an error. The only
* exception to this rule is if the previous operation received one of
* the following errors: NFSERR_STALE_CLIENTID, NFSERR_STALE_STATEID,
* NFSERR_BAD_STATEID, NFSERR_BAD_SEQID, NFSERR_BADXDR,
* NFSERR_RESOURCE, NFSERR_NOFILEHANDLE.
*
*/
#define seqid_mutating_err(err) \
(((err) != NFSERR_STALE_CLIENTID) && \
((err) != NFSERR_STALE_STATEID) && \
((err) != NFSERR_BAD_STATEID) && \
((err) != NFSERR_BAD_SEQID) && \
((err) != NFSERR_BAD_XDR) && \
((err) != NFSERR_RESOURCE) && \
((err) != NFSERR_NOFILEHANDLE))
enum nfs4_client_state {
NFS4CLNT_MANAGER_RUNNING = 0,
NFS4CLNT_CHECK_LEASE,
......
......@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/exportfs.h>
#include <linux/nfsd/syscall.h>
#include <net/ipv6.h>
#include "nfsd.h"
......@@ -318,7 +317,6 @@ static void svc_export_put(struct kref *ref)
struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
path_put(&exp->ex_path);
auth_domain_put(exp->ex_client);
kfree(exp->ex_pathname);
nfsd4_fslocs_free(&exp->ex_fslocs);
kfree(exp);
}
......@@ -528,11 +526,6 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
exp.ex_client = dom;
err = -ENOMEM;
exp.ex_pathname = kstrdup(buf, GFP_KERNEL);
if (!exp.ex_pathname)
goto out2;
/* expiry */
err = -EINVAL;
exp.h.expiry_time = get_expiry(&mesg);
......@@ -613,8 +606,6 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
nfsd4_fslocs_free(&exp.ex_fslocs);
kfree(exp.ex_uuid);
out3:
kfree(exp.ex_pathname);
out2:
path_put(&exp.ex_path);
out1:
auth_domain_put(dom);
......@@ -678,7 +669,6 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
new->ex_client = item->ex_client;
new->ex_path.dentry = dget(item->ex_path.dentry);
new->ex_path.mnt = mntget(item->ex_path.mnt);
new->ex_pathname = NULL;
new->ex_fslocs.locations = NULL;
new->ex_fslocs.locations_count = 0;
new->ex_fslocs.migrated = 0;
......@@ -696,8 +686,6 @@ static void export_update(struct cache_head *cnew, struct cache_head *citem)
new->ex_fsid = item->ex_fsid;
new->ex_uuid = item->ex_uuid;
item->ex_uuid = NULL;
new->ex_pathname = item->ex_pathname;
item->ex_pathname = NULL;
new->ex_fslocs.locations = item->ex_fslocs.locations;
item->ex_fslocs.locations = NULL;
new->ex_fslocs.locations_count = item->ex_fslocs.locations_count;
......@@ -1010,7 +998,7 @@ rqst_exp_parent(struct svc_rqst *rqstp, struct path *path)
return exp;
}
static struct svc_export *find_fsidzero_export(struct svc_rqst *rqstp)
struct svc_export *rqst_find_fsidzero_export(struct svc_rqst *rqstp)
{
u32 fsidv[2];
......@@ -1030,7 +1018,7 @@ exp_pseudoroot(struct svc_rqst *rqstp, struct svc_fh *fhp)
struct svc_export *exp;
__be32 rv;
exp = find_fsidzero_export(rqstp);
exp = rqst_find_fsidzero_export(rqstp);
if (IS_ERR(exp))
return nfserrno(PTR_ERR(exp));
rv = fh_compose(fhp, exp, exp->ex_path.dentry, NULL);
......
......@@ -39,6 +39,8 @@
#define NFSDDBG_FACILITY NFSDDBG_PROC
static void nfsd4_mark_cb_fault(struct nfs4_client *, int reason);
#define NFSPROC4_CB_NULL 0
#define NFSPROC4_CB_COMPOUND 1
......@@ -351,7 +353,7 @@ static void encode_cb_recall4args(struct xdr_stream *xdr,
__be32 *p;
encode_nfs_cb_opnum4(xdr, OP_CB_RECALL);
encode_stateid4(xdr, &dp->dl_stateid);
encode_stateid4(xdr, &dp->dl_stid.sc_stateid);
p = xdr_reserve_space(xdr, 4);
*p++ = xdr_zero; /* truncate */
......@@ -460,6 +462,8 @@ static int decode_cb_sequence4resok(struct xdr_stream *xdr,
*/
status = 0;
out:
if (status)
nfsd4_mark_cb_fault(cb->cb_clp, status);
return status;
out_overflow:
print_overflow_msg(__func__, xdr);
......@@ -686,6 +690,12 @@ static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason)
warn_no_callback_path(clp, reason);
}
static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason)
{
clp->cl_cb_state = NFSD4_CB_FAULT;
warn_no_callback_path(clp, reason);
}
static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
{
struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
......@@ -787,7 +797,7 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
{
struct nfsd4_callback *cb = calldata;
struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
struct nfs4_client *clp = dp->dl_client;
struct nfs4_client *clp = dp->dl_stid.sc_client;
u32 minorversion = clp->cl_minorversion;
cb->cb_minorversion = minorversion;
......@@ -809,7 +819,7 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
{
struct nfsd4_callback *cb = calldata;
struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
struct nfs4_client *clp = dp->dl_client;
struct nfs4_client *clp = dp->dl_stid.sc_client;
dprintk("%s: minorversion=%d\n", __func__,
clp->cl_minorversion);
......@@ -832,7 +842,7 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
{
struct nfsd4_callback *cb = calldata;
struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
struct nfs4_client *clp = dp->dl_client;
struct nfs4_client *clp = dp->dl_stid.sc_client;
struct rpc_clnt *current_rpc_client = clp->cl_cb_client;
nfsd4_cb_done(task, calldata);
......@@ -1006,7 +1016,7 @@ void nfsd4_do_callback_rpc(struct work_struct *w)
void nfsd4_cb_recall(struct nfs4_delegation *dp)
{
struct nfsd4_callback *cb = &dp->dl_recall;
struct nfs4_client *clp = dp->dl_client;
struct nfs4_client *clp = dp->dl_stid.sc_client;
dp->dl_retries = 1;
cb->cb_op = dp;
......
......@@ -35,6 +35,7 @@
#include <linux/file.h>
#include <linux/slab.h>
#include "idmap.h"
#include "cache.h"
#include "xdr4.h"
#include "vfs.h"
......@@ -156,6 +157,8 @@ do_open_permission(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfs
!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
return nfserr_inval;
accmode |= NFSD_MAY_READ_IF_EXEC;
if (open->op_share_access & NFS4_SHARE_ACCESS_READ)
accmode |= NFSD_MAY_READ;
if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
......@@ -168,12 +171,29 @@ do_open_permission(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfs
return status;
}
static __be32 nfsd_check_obj_isreg(struct svc_fh *fh)
{
umode_t mode = fh->fh_dentry->d_inode->i_mode;
if (S_ISREG(mode))
return nfs_ok;
if (S_ISDIR(mode))
return nfserr_isdir;
/*
* Using err_symlink as our catch-all case may look odd; but
* there's no other obvious error for this case in 4.0, and we
* happen to know that it will cause the linux v4 client to do
* the right thing on attempts to open something other than a
* regular file.
*/
return nfserr_symlink;
}
static __be32
do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
{
struct svc_fh resfh;
__be32 status;
int created = 0;
fh_init(&resfh, NFS4_FHSIZE);
open->op_truncate = 0;
......@@ -202,7 +222,7 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
open->op_fname.len, &open->op_iattr,
&resfh, open->op_createmode,
(u32 *)open->op_verf.data,
&open->op_truncate, &created);
&open->op_truncate, &open->op_created);
/*
* Following rfc 3530 14.2.16, use the returned bitmask
......@@ -216,6 +236,9 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
status = nfsd_lookup(rqstp, current_fh,
open->op_fname.data, open->op_fname.len, &resfh);
fh_unlock(current_fh);
if (status)
goto out;
status = nfsd_check_obj_isreg(&resfh);
}
if (status)
goto out;
......@@ -227,9 +250,9 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
fh_dup2(current_fh, &resfh);
/* set reply cache */
fh_copy_shallow(&open->op_stateowner->so_replay.rp_openfh,
fh_copy_shallow(&open->op_openowner->oo_owner.so_replay.rp_openfh,
&resfh.fh_handle);
if (!created)
if (!open->op_created)
status = do_open_permission(rqstp, current_fh, open,
NFSD_MAY_NOP);
......@@ -254,7 +277,7 @@ do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_
memset(&open->op_cinfo, 0, sizeof(struct nfsd4_change_info));
/* set replay cache */
fh_copy_shallow(&open->op_stateowner->so_replay.rp_openfh,
fh_copy_shallow(&open->op_openowner->oo_owner.so_replay.rp_openfh,
&current_fh->fh_handle);
open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) &&
......@@ -283,14 +306,18 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
__be32 status;
struct nfsd4_compoundres *resp;
dprintk("NFSD: nfsd4_open filename %.*s op_stateowner %p\n",
dprintk("NFSD: nfsd4_open filename %.*s op_openowner %p\n",
(int)open->op_fname.len, open->op_fname.data,
open->op_stateowner);
open->op_openowner);
/* This check required by spec. */
if (open->op_create && open->op_claim_type != NFS4_OPEN_CLAIM_NULL)
return nfserr_inval;
/* We don't yet support WANT bits: */
open->op_share_access &= NFS4_SHARE_ACCESS_MASK;
open->op_created = 0;
/*
* RFC5661 18.51.3
* Before RECLAIM_COMPLETE done, server should deny new lock
......@@ -309,7 +336,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
resp = rqstp->rq_resp;
status = nfsd4_process_open1(&resp->cstate, open);
if (status == nfserr_replay_me) {
struct nfs4_replay *rp = &open->op_stateowner->so_replay;
struct nfs4_replay *rp = &open->op_openowner->oo_owner.so_replay;
fh_put(&cstate->current_fh);
fh_copy_shallow(&cstate->current_fh.fh_handle,
&rp->rp_openfh);
......@@ -339,32 +366,23 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
switch (open->op_claim_type) {
case NFS4_OPEN_CLAIM_DELEGATE_CUR:
case NFS4_OPEN_CLAIM_NULL:
/*
* (1) set CURRENT_FH to the file being opened,
* creating it if necessary, (2) set open->op_cinfo,
* (3) set open->op_truncate if the file is to be
* truncated after opening, (4) do permission checking.
*/
status = do_open_lookup(rqstp, &cstate->current_fh,
open);
if (status)
goto out;
break;
case NFS4_OPEN_CLAIM_PREVIOUS:
open->op_stateowner->so_confirmed = 1;
/*
* The CURRENT_FH is already set to the file being
* opened. (1) set open->op_cinfo, (2) set
* open->op_truncate if the file is to be truncated
* after opening, (3) do permission checking.
*/
open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
case NFS4_OPEN_CLAIM_FH:
case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
status = do_open_fhandle(rqstp, &cstate->current_fh,
open);
if (status)
goto out;
break;
case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
case NFS4_OPEN_CLAIM_DELEGATE_PREV:
open->op_stateowner->so_confirmed = 1;
open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
dprintk("NFSD: unsupported OPEN claim type %d\n",
open->op_claim_type);
status = nfserr_notsupp;
......@@ -381,11 +399,12 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* set, (2) sets open->op_stateid, (3) sets open->op_delegation.
*/
status = nfsd4_process_open2(rqstp, &cstate->current_fh, open);
WARN_ON(status && open->op_created);
out:
if (open->op_stateowner) {
nfs4_get_stateowner(open->op_stateowner);
cstate->replay_owner = open->op_stateowner;
}
nfsd4_cleanup_open_state(open, status);
if (open->op_openowner)
cstate->replay_owner = &open->op_openowner->oo_owner;
else
nfs4_unlock_state();
return status;
}
......@@ -467,17 +486,12 @@ static __be32
nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_commit *commit)
{
__be32 status;
u32 *p = (u32 *)commit->co_verf.data;
*p++ = nfssvc_boot.tv_sec;
*p++ = nfssvc_boot.tv_usec;
status = nfsd_commit(rqstp, &cstate->current_fh, commit->co_offset,
return nfsd_commit(rqstp, &cstate->current_fh, commit->co_offset,
commit->co_count);
if (status == nfserr_symlink)
status = nfserr_inval;
return status;
}
static __be32
......@@ -492,8 +506,6 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = fh_verify(rqstp, &cstate->current_fh, S_IFDIR,
NFSD_MAY_CREATE);
if (status == nfserr_symlink)
status = nfserr_notdir;
if (status)
return status;
......@@ -691,7 +703,7 @@ nfsd4_readdir(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
readdir->rd_bmval[1] &= nfsd_suppattrs1(cstate->minorversion);
readdir->rd_bmval[2] &= nfsd_suppattrs2(cstate->minorversion);
if ((cookie > ~(u32)0) || (cookie == 1) || (cookie == 2) ||
if ((cookie == 1) || (cookie == 2) ||
(cookie == 0 && memcmp(readdir->rd_verf.data, zeroverf.data, NFS4_VERIFIER_SIZE)))
return nfserr_bad_cookie;
......@@ -719,8 +731,6 @@ nfsd4_remove(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return nfserr_grace;
status = nfsd_unlink(rqstp, &cstate->current_fh, 0,
remove->rm_name, remove->rm_namelen);
if (status == nfserr_symlink)
return nfserr_notdir;
if (!status) {
fh_unlock(&cstate->current_fh);
set_change_info(&remove->rm_cinfo, &cstate->current_fh);
......@@ -751,8 +761,6 @@ nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
(S_ISDIR(cstate->save_fh.fh_dentry->d_inode->i_mode) &&
S_ISDIR(cstate->current_fh.fh_dentry->d_inode->i_mode)))
status = nfserr_exist;
else if (status == nfserr_symlink)
status = nfserr_notdir;
if (!status) {
set_change_info(&rename->rn_sinfo, &cstate->current_fh);
......@@ -892,8 +900,6 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
write->wr_bytes_written = cnt;
if (status == nfserr_symlink)
status = nfserr_inval;
return status;
}
......@@ -930,7 +936,7 @@ _nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
count = 4 + (verify->ve_attrlen >> 2);
buf = kmalloc(count << 2, GFP_KERNEL);
if (!buf)
return nfserr_resource;
return nfserr_jukebox;
status = nfsd4_encode_fattr(&cstate->current_fh,
cstate->current_fh.fh_export,
......@@ -994,6 +1000,8 @@ static inline void nfsd4_increment_op_stats(u32 opnum)
typedef __be32(*nfsd4op_func)(struct svc_rqst *, struct nfsd4_compound_state *,
void *);
typedef u32(*nfsd4op_rsize)(struct svc_rqst *, struct nfsd4_op *op);
enum nfsd4_op_flags {
ALLOWED_WITHOUT_FH = 1 << 0, /* No current filehandle required */
ALLOWED_ON_ABSENT_FS = 1 << 1, /* ops processed on absent fs */
......@@ -1001,13 +1009,15 @@ enum nfsd4_op_flags {
/* For rfc 5661 section 2.6.3.1.1: */
OP_HANDLES_WRONGSEC = 1 << 3,
OP_IS_PUTFH_LIKE = 1 << 4,
};
struct nfsd4_operation {
nfsd4op_func op_func;
u32 op_flags;
char *op_name;
/*
* These are the ops whose result size we estimate before
* encoding, to avoid performing an op then not being able to
* respond or cache a response. This includes writes and setattrs
* as well as the operations usually called "nonidempotent":
*/
OP_MODIFIES_SOMETHING = 1 << 5,
/*
* Cache compounds containing these ops in the xid-based drc:
* We use the DRC for compounds containing non-idempotent
* operations, *except* those that are 4.1-specific (since
* sessions provide their own EOS), and except for stateful
......@@ -1015,7 +1025,15 @@ struct nfsd4_operation {
* (since sequence numbers provide EOS for open, lock, etc in
* the v4.0 case).
*/
bool op_cacheresult;
OP_CACHEME = 1 << 6,
};
struct nfsd4_operation {
nfsd4op_func op_func;
u32 op_flags;
char *op_name;
/* Try to get response size before operation */
nfsd4op_rsize op_rsize_bop;
};
static struct nfsd4_operation nfsd4_ops[];
......@@ -1062,7 +1080,7 @@ static inline struct nfsd4_operation *OPDESC(struct nfsd4_op *op)
bool nfsd4_cache_this_op(struct nfsd4_op *op)
{
return OPDESC(op)->op_cacheresult;
return OPDESC(op)->op_flags & OP_CACHEME;
}
static bool need_wrongsec_check(struct svc_rqst *rqstp)
......@@ -1110,6 +1128,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
struct nfsd4_operation *opdesc;
struct nfsd4_compound_state *cstate = &resp->cstate;
int slack_bytes;
u32 plen = 0;
__be32 status;
resp->xbuf = &rqstp->rq_res;
......@@ -1188,6 +1207,15 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
goto encode_op;
}
/* If op is non-idempotent */
if (opdesc->op_flags & OP_MODIFIES_SOMETHING) {
plen = opdesc->op_rsize_bop(rqstp, op);
op->status = nfsd4_check_resp_size(resp, plen);
}
if (op->status)
goto encode_op;
if (opdesc->op_func)
op->status = opdesc->op_func(rqstp, cstate, &op->u);
else
......@@ -1217,7 +1245,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
be32_to_cpu(status));
if (cstate->replay_owner) {
nfs4_put_stateowner(cstate->replay_owner);
nfs4_unlock_state();
cstate->replay_owner = NULL;
}
/* XXX Ugh, we need to get rid of this kind of special case: */
......@@ -1238,6 +1266,144 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
return status;
}
#define op_encode_hdr_size (2)
#define op_encode_stateid_maxsz (XDR_QUADLEN(NFS4_STATEID_SIZE))
#define op_encode_verifier_maxsz (XDR_QUADLEN(NFS4_VERIFIER_SIZE))
#define op_encode_change_info_maxsz (5)
#define nfs4_fattr_bitmap_maxsz (4)
#define op_encode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
#define op_encode_lock_denied_maxsz (8 + op_encode_lockowner_maxsz)
#define nfs4_owner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
#define op_encode_ace_maxsz (3 + nfs4_owner_maxsz)
#define op_encode_delegation_maxsz (1 + op_encode_stateid_maxsz + 1 + \
op_encode_ace_maxsz)
#define op_encode_channel_attrs_maxsz (6 + 1 + 1)
static inline u32 nfsd4_only_status_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size) * sizeof(__be32);
}
static inline u32 nfsd4_status_stateid_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_stateid_maxsz)* sizeof(__be32);
}
static inline u32 nfsd4_commit_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_verifier_maxsz) * sizeof(__be32);
}
static inline u32 nfsd4_create_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz
+ nfs4_fattr_bitmap_maxsz) * sizeof(__be32);
}
static inline u32 nfsd4_link_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz)
* sizeof(__be32);
}
static inline u32 nfsd4_lock_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_lock_denied_maxsz)
* sizeof(__be32);
}
static inline u32 nfsd4_open_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_stateid_maxsz
+ op_encode_change_info_maxsz + 1
+ nfs4_fattr_bitmap_maxsz
+ op_encode_delegation_maxsz) * sizeof(__be32);
}
static inline u32 nfsd4_read_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
u32 maxcount = 0, rlen = 0;
maxcount = svc_max_payload(rqstp);
rlen = op->u.read.rd_length;
if (rlen > maxcount)
rlen = maxcount;
return (op_encode_hdr_size + 2) * sizeof(__be32) + rlen;
}
static inline u32 nfsd4_readdir_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
u32 rlen = op->u.readdir.rd_maxcount;
if (rlen > PAGE_SIZE)
rlen = PAGE_SIZE;
return (op_encode_hdr_size + op_encode_verifier_maxsz)
* sizeof(__be32) + rlen;
}
static inline u32 nfsd4_remove_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz)
* sizeof(__be32);
}
static inline u32 nfsd4_rename_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz
+ op_encode_change_info_maxsz) * sizeof(__be32);
}
static inline u32 nfsd4_setattr_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + nfs4_fattr_bitmap_maxsz) * sizeof(__be32);
}
static inline u32 nfsd4_setclientid_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + 2 + 1024) * sizeof(__be32);
}
static inline u32 nfsd4_write_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_verifier_maxsz) * sizeof(__be32);
}
static inline u32 nfsd4_exchange_id_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + 2 + 1 + /* eir_clientid, eir_sequenceid */\
1 + 1 + 0 + /* eir_flags, spr_how, SP4_NONE (for now) */\
2 + /*eir_server_owner.so_minor_id */\
/* eir_server_owner.so_major_id<> */\
XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 +\
/* eir_server_scope<> */\
XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 +\
1 + /* eir_server_impl_id array length */\
0 /* ignored eir_server_impl_id contents */) * sizeof(__be32);
}
static inline u32 nfsd4_bind_conn_to_session_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + \
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + /* bctsr_sessid */\
2 /* bctsr_dir, use_conn_in_rdma_mode */) * sizeof(__be32);
}
static inline u32 nfsd4_create_session_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + \
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + /* sessionid */\
2 + /* csr_sequence, csr_flags */\
op_encode_channel_attrs_maxsz + \
op_encode_channel_attrs_maxsz) * sizeof(__be32);
}
static struct nfsd4_operation nfsd4_ops[] = {
[OP_ACCESS] = {
.op_func = (nfsd4op_func)nfsd4_access,
......@@ -1245,20 +1411,27 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
[OP_CLOSE] = {
.op_func = (nfsd4op_func)nfsd4_close,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_CLOSE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
},
[OP_COMMIT] = {
.op_func = (nfsd4op_func)nfsd4_commit,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_COMMIT",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_commit_rsize,
},
[OP_CREATE] = {
.op_func = (nfsd4op_func)nfsd4_create,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_CREATE",
.op_cacheresult = true,
.op_rsize_bop = (nfsd4op_rsize)nfsd4_create_rsize,
},
[OP_DELEGRETURN] = {
.op_func = (nfsd4op_func)nfsd4_delegreturn,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_DELEGRETURN",
.op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_GETATTR] = {
.op_func = (nfsd4op_func)nfsd4_getattr,
......@@ -1271,12 +1444,16 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
[OP_LINK] = {
.op_func = (nfsd4op_func)nfsd4_link,
.op_flags = ALLOWED_ON_ABSENT_FS | OP_MODIFIES_SOMETHING
| OP_CACHEME,
.op_name = "OP_LINK",
.op_cacheresult = true,
.op_rsize_bop = (nfsd4op_rsize)nfsd4_link_rsize,
},
[OP_LOCK] = {
.op_func = (nfsd4op_func)nfsd4_lock,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LOCK",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_lock_rsize,
},
[OP_LOCKT] = {
.op_func = (nfsd4op_func)nfsd4_lockt,
......@@ -1284,7 +1461,9 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
[OP_LOCKU] = {
.op_func = (nfsd4op_func)nfsd4_locku,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LOCKU",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
},
[OP_LOOKUP] = {
.op_func = (nfsd4op_func)nfsd4_lookup,
......@@ -1302,42 +1481,54 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
[OP_OPEN] = {
.op_func = (nfsd4op_func)nfsd4_open,
.op_flags = OP_HANDLES_WRONGSEC,
.op_flags = OP_HANDLES_WRONGSEC | OP_MODIFIES_SOMETHING,
.op_name = "OP_OPEN",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_open_rsize,
},
[OP_OPEN_CONFIRM] = {
.op_func = (nfsd4op_func)nfsd4_open_confirm,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_OPEN_CONFIRM",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
},
[OP_OPEN_DOWNGRADE] = {
.op_func = (nfsd4op_func)nfsd4_open_downgrade,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_OPEN_DOWNGRADE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
},
[OP_PUTFH] = {
.op_func = (nfsd4op_func)nfsd4_putfh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_IS_PUTFH_LIKE,
| OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING,
.op_name = "OP_PUTFH",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_PUTPUBFH] = {
.op_func = (nfsd4op_func)nfsd4_putrootfh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_IS_PUTFH_LIKE,
| OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING,
.op_name = "OP_PUTPUBFH",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_PUTROOTFH] = {
.op_func = (nfsd4op_func)nfsd4_putrootfh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_IS_PUTFH_LIKE,
| OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING,
.op_name = "OP_PUTROOTFH",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_READ] = {
.op_func = (nfsd4op_func)nfsd4_read,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_READ",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_read_rsize,
},
[OP_READDIR] = {
.op_func = (nfsd4op_func)nfsd4_readdir,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_READDIR",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_readdir_rsize,
},
[OP_READLINK] = {
.op_func = (nfsd4op_func)nfsd4_readlink,
......@@ -1345,29 +1536,36 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
[OP_REMOVE] = {
.op_func = (nfsd4op_func)nfsd4_remove,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_REMOVE",
.op_cacheresult = true,
.op_rsize_bop = (nfsd4op_rsize)nfsd4_remove_rsize,
},
[OP_RENAME] = {
.op_name = "OP_RENAME",
.op_func = (nfsd4op_func)nfsd4_rename,
.op_cacheresult = true,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_RENAME",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_rename_rsize,
},
[OP_RENEW] = {
.op_func = (nfsd4op_func)nfsd4_renew,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_MODIFIES_SOMETHING,
.op_name = "OP_RENEW",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_RESTOREFH] = {
.op_func = (nfsd4op_func)nfsd4_restorefh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_IS_PUTFH_LIKE,
| OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING,
.op_name = "OP_RESTOREFH",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_SAVEFH] = {
.op_func = (nfsd4op_func)nfsd4_savefh,
.op_flags = OP_HANDLES_WRONGSEC,
.op_flags = OP_HANDLES_WRONGSEC | OP_MODIFIES_SOMETHING,
.op_name = "OP_SAVEFH",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_SECINFO] = {
.op_func = (nfsd4op_func)nfsd4_secinfo,
......@@ -1377,19 +1575,22 @@ static struct nfsd4_operation nfsd4_ops[] = {
[OP_SETATTR] = {
.op_func = (nfsd4op_func)nfsd4_setattr,
.op_name = "OP_SETATTR",
.op_cacheresult = true,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_rsize_bop = (nfsd4op_rsize)nfsd4_setattr_rsize,
},
[OP_SETCLIENTID] = {
.op_func = (nfsd4op_func)nfsd4_setclientid,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_SETCLIENTID",
.op_cacheresult = true,
.op_rsize_bop = (nfsd4op_rsize)nfsd4_setclientid_rsize,
},
[OP_SETCLIENTID_CONFIRM] = {
.op_func = (nfsd4op_func)nfsd4_setclientid_confirm,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_SETCLIENTID_CONFIRM",
.op_cacheresult = true,
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_VERIFY] = {
.op_func = (nfsd4op_func)nfsd4_verify,
......@@ -1397,35 +1598,46 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
[OP_WRITE] = {
.op_func = (nfsd4op_func)nfsd4_write,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_WRITE",
.op_cacheresult = true,
.op_rsize_bop = (nfsd4op_rsize)nfsd4_write_rsize,
},
[OP_RELEASE_LOCKOWNER] = {
.op_func = (nfsd4op_func)nfsd4_release_lockowner,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_MODIFIES_SOMETHING,
.op_name = "OP_RELEASE_LOCKOWNER",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
/* NFSv4.1 operations */
[OP_EXCHANGE_ID] = {
.op_func = (nfsd4op_func)nfsd4_exchange_id,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_EXCHANGE_ID",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_exchange_id_rsize,
},
[OP_BIND_CONN_TO_SESSION] = {
.op_func = (nfsd4op_func)nfsd4_bind_conn_to_session,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_BIND_CONN_TO_SESSION",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_bind_conn_to_session_rsize,
},
[OP_CREATE_SESSION] = {
.op_func = (nfsd4op_func)nfsd4_create_session,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_CREATE_SESSION",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_create_session_rsize,
},
[OP_DESTROY_SESSION] = {
.op_func = (nfsd4op_func)nfsd4_destroy_session,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_DESTROY_SESSION",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_SEQUENCE] = {
.op_func = (nfsd4op_func)nfsd4_sequence,
......@@ -1433,14 +1645,17 @@ static struct nfsd4_operation nfsd4_ops[] = {
.op_name = "OP_SEQUENCE",
},
[OP_DESTROY_CLIENTID] = {
.op_func = NULL,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
.op_func = (nfsd4op_func)nfsd4_destroy_clientid,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_DESTROY_CLIENTID",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_RECLAIM_COMPLETE] = {
.op_func = (nfsd4op_func)nfsd4_reclaim_complete,
.op_flags = ALLOWED_WITHOUT_FH,
.op_flags = ALLOWED_WITHOUT_FH | OP_MODIFIES_SOMETHING,
.op_name = "OP_RECLAIM_COMPLETE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_SECINFO_NO_NAME] = {
.op_func = (nfsd4op_func)nfsd4_secinfo_no_name,
......@@ -1454,8 +1669,9 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
[OP_FREE_STATEID] = {
.op_func = (nfsd4op_func)nfsd4_free_stateid,
.op_flags = ALLOWED_WITHOUT_FH,
.op_flags = ALLOWED_WITHOUT_FH | OP_MODIFIES_SOMETHING,
.op_name = "OP_FREE_STATEID",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
};
......
......@@ -45,6 +45,7 @@
/* Globals */
static struct file *rec_file;
static char user_recovery_dirname[PATH_MAX] = "/var/lib/nfs/v4recovery";
static int
nfs4_save_creds(const struct cred **original_creds)
......@@ -88,7 +89,7 @@ nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname)
struct xdr_netobj cksum;
struct hash_desc desc;
struct scatterlist sg;
__be32 status = nfserr_resource;
__be32 status = nfserr_jukebox;
dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n",
clname->len, clname->data);
......@@ -129,6 +130,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
if (!rec_file || clp->cl_firststate)
return 0;
clp->cl_firststate = 1;
status = nfs4_save_creds(&original_cred);
if (status < 0)
return status;
......@@ -143,10 +145,8 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
goto out_unlock;
}
status = -EEXIST;
if (dentry->d_inode) {
dprintk("NFSD: nfsd4_create_clid_dir: DIRECTORY EXISTS\n");
if (dentry->d_inode)
goto out_put;
}
status = mnt_want_write(rec_file->f_path.mnt);
if (status)
goto out_put;
......@@ -156,12 +156,14 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
dput(dentry);
out_unlock:
mutex_unlock(&dir->d_inode->i_mutex);
if (status == 0) {
clp->cl_firststate = 1;
if (status == 0)
vfs_fsync(rec_file, 0);
}
else
printk(KERN_ERR "NFSD: failed to write recovery record"
" (err %d); please check that %s exists"
" and is writeable", status,
user_recovery_dirname);
nfs4_reset_creds(original_cred);
dprintk("NFSD: nfsd4_create_clid_dir returns %d\n", status);
return status;
}
......@@ -354,13 +356,13 @@ nfsd4_recdir_load(void) {
*/
void
nfsd4_init_recdir(char *rec_dirname)
nfsd4_init_recdir()
{
const struct cred *original_cred;
int status;
printk("NFSD: Using %s as the NFSv4 state recovery directory\n",
rec_dirname);
user_recovery_dirname);
BUG_ON(rec_file);
......@@ -372,10 +374,10 @@ nfsd4_init_recdir(char *rec_dirname)
return;
}
rec_file = filp_open(rec_dirname, O_RDONLY | O_DIRECTORY, 0);
rec_file = filp_open(user_recovery_dirname, O_RDONLY | O_DIRECTORY, 0);
if (IS_ERR(rec_file)) {
printk("NFSD: unable to find recovery directory %s\n",
rec_dirname);
user_recovery_dirname);
rec_file = NULL;
}
......@@ -390,3 +392,30 @@ nfsd4_shutdown_recdir(void)
fput(rec_file);
rec_file = NULL;
}
/*
* Change the NFSv4 recovery directory to recdir.
*/
int
nfs4_reset_recoverydir(char *recdir)
{
int status;
struct path path;
status = kern_path(recdir, LOOKUP_FOLLOW, &path);
if (status)
return status;
status = -ENOTDIR;
if (S_ISDIR(path.dentry->d_inode->i_mode)) {
strcpy(user_recovery_dirname, recdir);
status = 0;
}
path_put(&path);
return status;
}
char *
nfs4_recoverydir(void)
{
return user_recovery_dirname;
}
......@@ -49,9 +49,6 @@
time_t nfsd4_lease = 90; /* default lease time */
time_t nfsd4_grace = 90;
static time_t boot_time;
static u32 current_ownerid = 1;
static u32 current_fileid = 1;
static u32 current_delegid = 1;
static stateid_t zerostateid; /* bits all 0 */
static stateid_t onestateid; /* bits all 1 */
static u64 current_sessionid = 1;
......@@ -60,13 +57,7 @@ static u64 current_sessionid = 1;
#define ONE_STATEID(stateid) (!memcmp((stateid), &onestateid, sizeof(stateid_t)))
/* forward declarations */
static struct nfs4_stateid * find_stateid(stateid_t *stid, int flags);
static struct nfs4_stateid * search_for_stateid(stateid_t *stid);
static struct nfs4_delegation * search_for_delegation(stateid_t *stid);
static struct nfs4_delegation * find_delegation_stateid(struct inode *ino, stateid_t *stid);
static char user_recovery_dirname[PATH_MAX] = "/var/lib/nfs/v4recovery";
static void nfs4_set_recdir(char *recdir);
static int check_for_locks(struct nfs4_file *filp, struct nfs4_stateowner *lowner);
static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner);
/* Locking: */
......@@ -80,7 +71,8 @@ static DEFINE_MUTEX(client_mutex);
*/
static DEFINE_SPINLOCK(recall_lock);
static struct kmem_cache *stateowner_slab = NULL;
static struct kmem_cache *openowner_slab = NULL;
static struct kmem_cache *lockowner_slab = NULL;
static struct kmem_cache *file_slab = NULL;
static struct kmem_cache *stateid_slab = NULL;
static struct kmem_cache *deleg_slab = NULL;
......@@ -112,6 +104,11 @@ opaque_hashval(const void *ptr, int nbytes)
static struct list_head del_recall_lru;
static void nfsd4_free_file(struct nfs4_file *f)
{
kmem_cache_free(file_slab, f);
}
static inline void
put_nfs4_file(struct nfs4_file *fi)
{
......@@ -119,7 +116,7 @@ put_nfs4_file(struct nfs4_file *fi)
list_del(&fi->fi_hash);
spin_unlock(&recall_lock);
iput(fi->fi_inode);
kmem_cache_free(file_slab, fi);
nfsd4_free_file(fi);
}
}
......@@ -136,35 +133,33 @@ unsigned int max_delegations;
* Open owner state (share locks)
*/
/* hash tables for nfs4_stateowner */
#define OWNER_HASH_BITS 8
#define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
#define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
/* hash tables for open owners */
#define OPEN_OWNER_HASH_BITS 8
#define OPEN_OWNER_HASH_SIZE (1 << OPEN_OWNER_HASH_BITS)
#define OPEN_OWNER_HASH_MASK (OPEN_OWNER_HASH_SIZE - 1)
static unsigned int open_ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername)
{
unsigned int ret;
#define ownerid_hashval(id) \
((id) & OWNER_HASH_MASK)
#define ownerstr_hashval(clientid, ownername) \
(((clientid) + opaque_hashval((ownername.data), (ownername.len))) & OWNER_HASH_MASK)
ret = opaque_hashval(ownername->data, ownername->len);
ret += clientid;
return ret & OPEN_OWNER_HASH_MASK;
}
static struct list_head ownerid_hashtbl[OWNER_HASH_SIZE];
static struct list_head ownerstr_hashtbl[OWNER_HASH_SIZE];
static struct list_head open_ownerstr_hashtbl[OPEN_OWNER_HASH_SIZE];
/* hash table for nfs4_file */
#define FILE_HASH_BITS 8
#define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
/* hash table for (open)nfs4_stateid */
#define STATEID_HASH_BITS 10
#define STATEID_HASH_SIZE (1 << STATEID_HASH_BITS)
#define STATEID_HASH_MASK (STATEID_HASH_SIZE - 1)
#define file_hashval(x) \
hash_ptr(x, FILE_HASH_BITS)
#define stateid_hashval(owner_id, file_id) \
(((owner_id) + (file_id)) & STATEID_HASH_MASK)
static unsigned int file_hashval(struct inode *ino)
{
/* XXX: why are we hashing on inode pointer, anyway? */
return hash_ptr(ino, FILE_HASH_BITS);
}
static struct list_head file_hashtbl[FILE_HASH_SIZE];
static struct list_head stateid_hashtbl[STATEID_HASH_SIZE];
static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag)
{
......@@ -192,8 +187,15 @@ static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag)
static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
{
if (atomic_dec_and_test(&fp->fi_access[oflag])) {
nfs4_file_put_fd(fp, O_RDWR);
nfs4_file_put_fd(fp, oflag);
/*
* It's also safe to get rid of the RDWR open *if*
* we no longer have need of the other kind of access
* or if we already have the other kind of open:
*/
if (fp->fi_fds[1-oflag]
|| atomic_read(&fp->fi_access[1 - oflag]) == 0)
nfs4_file_put_fd(fp, O_RDWR);
}
}
......@@ -206,8 +208,73 @@ static void nfs4_file_put_access(struct nfs4_file *fp, int oflag)
__nfs4_file_put_access(fp, oflag);
}
static inline int get_new_stid(struct nfs4_stid *stid)
{
static int min_stateid = 0;
struct idr *stateids = &stid->sc_client->cl_stateids;
int new_stid;
int error;
error = idr_get_new_above(stateids, stid, min_stateid, &new_stid);
/*
* Note: the necessary preallocation was done in
* nfs4_alloc_stateid(). The idr code caps the number of
* preallocations that can exist at a time, but the state lock
* prevents anyone from using ours before we get here:
*/
BUG_ON(error);
/*
* It shouldn't be a problem to reuse an opaque stateid value.
* I don't think it is for 4.1. But with 4.0 I worry that, for
* example, a stray write retransmission could be accepted by
* the server when it should have been rejected. Therefore,
* adopt a trick from the sctp code to attempt to maximize the
* amount of time until an id is reused, by ensuring they always
* "increase" (mod INT_MAX):
*/
min_stateid = new_stid+1;
if (min_stateid == INT_MAX)
min_stateid = 0;
return new_stid;
}
static void init_stid(struct nfs4_stid *stid, struct nfs4_client *cl, unsigned char type)
{
stateid_t *s = &stid->sc_stateid;
int new_id;
stid->sc_type = type;
stid->sc_client = cl;
s->si_opaque.so_clid = cl->cl_clientid;
new_id = get_new_stid(stid);
s->si_opaque.so_id = (u32)new_id;
/* Will be incremented before return to client: */
s->si_generation = 0;
}
static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab)
{
struct idr *stateids = &cl->cl_stateids;
if (!idr_pre_get(stateids, GFP_KERNEL))
return NULL;
/*
* Note: if we fail here (or any time between now and the time
* we actually get the new idr), we won't need to undo the idr
* preallocation, since the idr code caps the number of
* preallocated entries.
*/
return kmem_cache_alloc(slab, GFP_KERNEL);
}
static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp)
{
return openlockstateid(nfs4_alloc_stid(clp, stateid_slab));
}
static struct nfs4_delegation *
alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_fh *current_fh, u32 type)
alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh, u32 type)
{
struct nfs4_delegation *dp;
struct nfs4_file *fp = stp->st_file;
......@@ -224,21 +291,23 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f
return NULL;
if (num_delegations > max_delegations)
return NULL;
dp = kmem_cache_alloc(deleg_slab, GFP_KERNEL);
dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
if (dp == NULL)
return dp;
init_stid(&dp->dl_stid, clp, NFS4_DELEG_STID);
/*
* delegation seqid's are never incremented. The 4.1 special
* meaning of seqid 0 isn't meaningful, really, but let's avoid
* 0 anyway just for consistency and use 1:
*/
dp->dl_stid.sc_stateid.si_generation = 1;
num_delegations++;
INIT_LIST_HEAD(&dp->dl_perfile);
INIT_LIST_HEAD(&dp->dl_perclnt);
INIT_LIST_HEAD(&dp->dl_recall_lru);
dp->dl_client = clp;
get_nfs4_file(fp);
dp->dl_file = fp;
dp->dl_type = type;
dp->dl_stateid.si_boot = boot_time;
dp->dl_stateid.si_stateownerid = current_delegid++;
dp->dl_stateid.si_fileid = 0;
dp->dl_stateid.si_generation = 0;
fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
dp->dl_time = 0;
atomic_set(&dp->dl_count, 1);
......@@ -267,10 +336,18 @@ static void nfs4_put_deleg_lease(struct nfs4_file *fp)
}
}
static void unhash_stid(struct nfs4_stid *s)
{
struct idr *stateids = &s->sc_client->cl_stateids;
idr_remove(stateids, s->sc_stateid.si_opaque.so_id);
}
/* Called under the state lock. */
static void
unhash_delegation(struct nfs4_delegation *dp)
{
unhash_stid(&dp->dl_stid);
list_del_init(&dp->dl_perclnt);
spin_lock(&recall_lock);
list_del_init(&dp->dl_perfile);
......@@ -292,10 +369,16 @@ static DEFINE_SPINLOCK(client_lock);
#define CLIENT_HASH_SIZE (1 << CLIENT_HASH_BITS)
#define CLIENT_HASH_MASK (CLIENT_HASH_SIZE - 1)
#define clientid_hashval(id) \
((id) & CLIENT_HASH_MASK)
#define clientstr_hashval(name) \
(opaque_hashval((name), 8) & CLIENT_HASH_MASK)
static unsigned int clientid_hashval(u32 id)
{
return id & CLIENT_HASH_MASK;
}
static unsigned int clientstr_hashval(const char *name)
{
return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
}
/*
* reclaim_str_hashtbl[] holds known client info from previous reset/reboot
* used in reboot/reset lease grace period processing
......@@ -362,7 +445,7 @@ set_deny(unsigned int *deny, unsigned long bmap) {
}
static int
test_share(struct nfs4_stateid *stp, struct nfsd4_open *open) {
test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) {
unsigned int access, deny;
set_access(&access, stp->st_access_bmap);
......@@ -385,14 +468,13 @@ static int nfs4_access_to_omode(u32 access)
BUG();
}
static void unhash_generic_stateid(struct nfs4_stateid *stp)
static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
{
list_del(&stp->st_hash);
list_del(&stp->st_perfile);
list_del(&stp->st_perstateowner);
}
static void free_generic_stateid(struct nfs4_stateid *stp)
static void close_generic_stateid(struct nfs4_ol_stateid *stp)
{
int i;
......@@ -401,84 +483,106 @@ static void free_generic_stateid(struct nfs4_stateid *stp)
if (test_bit(i, &stp->st_access_bmap))
nfs4_file_put_access(stp->st_file,
nfs4_access_to_omode(i));
__clear_bit(i, &stp->st_access_bmap);
}
}
put_nfs4_file(stp->st_file);
stp->st_file = NULL;
}
static void free_generic_stateid(struct nfs4_ol_stateid *stp)
{
kmem_cache_free(stateid_slab, stp);
}
static void release_lock_stateid(struct nfs4_stateid *stp)
static void release_lock_stateid(struct nfs4_ol_stateid *stp)
{
struct file *file;
unhash_generic_stateid(stp);
unhash_stid(&stp->st_stid);
file = find_any_file(stp->st_file);
if (file)
locks_remove_posix(file, (fl_owner_t)stp->st_stateowner);
locks_remove_posix(file, (fl_owner_t)lockowner(stp->st_stateowner));
close_generic_stateid(stp);
free_generic_stateid(stp);
}
static void unhash_lockowner(struct nfs4_stateowner *sop)
static void unhash_lockowner(struct nfs4_lockowner *lo)
{
struct nfs4_stateid *stp;
struct nfs4_ol_stateid *stp;
list_del(&sop->so_idhash);
list_del(&sop->so_strhash);
list_del(&sop->so_perstateid);
while (!list_empty(&sop->so_stateids)) {
stp = list_first_entry(&sop->so_stateids,
struct nfs4_stateid, st_perstateowner);
list_del(&lo->lo_owner.so_strhash);
list_del(&lo->lo_perstateid);
while (!list_empty(&lo->lo_owner.so_stateids)) {
stp = list_first_entry(&lo->lo_owner.so_stateids,
struct nfs4_ol_stateid, st_perstateowner);
release_lock_stateid(stp);
}
}
static void release_lockowner(struct nfs4_stateowner *sop)
static void release_lockowner(struct nfs4_lockowner *lo)
{
unhash_lockowner(sop);
nfs4_put_stateowner(sop);
unhash_lockowner(lo);
nfs4_free_lockowner(lo);
}
static void
release_stateid_lockowners(struct nfs4_stateid *open_stp)
release_stateid_lockowners(struct nfs4_ol_stateid *open_stp)
{
struct nfs4_stateowner *lock_sop;
struct nfs4_lockowner *lo;
while (!list_empty(&open_stp->st_lockowners)) {
lock_sop = list_entry(open_stp->st_lockowners.next,
struct nfs4_stateowner, so_perstateid);
/* list_del(&open_stp->st_lockowners); */
BUG_ON(lock_sop->so_is_open_owner);
release_lockowner(lock_sop);
lo = list_entry(open_stp->st_lockowners.next,
struct nfs4_lockowner, lo_perstateid);
release_lockowner(lo);
}
}
static void release_open_stateid(struct nfs4_stateid *stp)
static void unhash_open_stateid(struct nfs4_ol_stateid *stp)
{
unhash_generic_stateid(stp);
release_stateid_lockowners(stp);
close_generic_stateid(stp);
}
static void release_open_stateid(struct nfs4_ol_stateid *stp)
{
unhash_open_stateid(stp);
unhash_stid(&stp->st_stid);
free_generic_stateid(stp);
}
static void unhash_openowner(struct nfs4_stateowner *sop)
static void unhash_openowner(struct nfs4_openowner *oo)
{
struct nfs4_stateid *stp;
struct nfs4_ol_stateid *stp;
list_del(&sop->so_idhash);
list_del(&sop->so_strhash);
list_del(&sop->so_perclient);
list_del(&sop->so_perstateid); /* XXX: necessary? */
while (!list_empty(&sop->so_stateids)) {
stp = list_first_entry(&sop->so_stateids,
struct nfs4_stateid, st_perstateowner);
list_del(&oo->oo_owner.so_strhash);
list_del(&oo->oo_perclient);
while (!list_empty(&oo->oo_owner.so_stateids)) {
stp = list_first_entry(&oo->oo_owner.so_stateids,
struct nfs4_ol_stateid, st_perstateowner);
release_open_stateid(stp);
}
}
static void release_openowner(struct nfs4_stateowner *sop)
static void release_last_closed_stateid(struct nfs4_openowner *oo)
{
struct nfs4_ol_stateid *s = oo->oo_last_closed_stid;
if (s) {
unhash_stid(&s->st_stid);
free_generic_stateid(s);
oo->oo_last_closed_stid = NULL;
}
}
static void release_openowner(struct nfs4_openowner *oo)
{
unhash_openowner(sop);
list_del(&sop->so_close_lru);
nfs4_put_stateowner(sop);
unhash_openowner(oo);
list_del(&oo->oo_close_lru);
release_last_closed_stateid(oo);
nfs4_free_openowner(oo);
}
#define SESSION_HASH_SIZE 512
......@@ -843,9 +947,6 @@ renew_client_locked(struct nfs4_client *clp)
return;
}
/*
* Move client to the end to the LRU list.
*/
dprintk("renewing client (clientid %08x/%08x)\n",
clp->cl_clientid.cl_boot,
clp->cl_clientid.cl_id);
......@@ -943,7 +1044,7 @@ unhash_client_locked(struct nfs4_client *clp)
static void
expire_client(struct nfs4_client *clp)
{
struct nfs4_stateowner *sop;
struct nfs4_openowner *oo;
struct nfs4_delegation *dp;
struct list_head reaplist;
......@@ -961,8 +1062,8 @@ expire_client(struct nfs4_client *clp)
unhash_delegation(dp);
}
while (!list_empty(&clp->cl_openowners)) {
sop = list_entry(clp->cl_openowners.next, struct nfs4_stateowner, so_perclient);
release_openowner(sop);
oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
release_openowner(oo);
}
nfsd4_shutdown_callback(clp);
if (clp->cl_cb_conn.cb_xprt)
......@@ -1038,6 +1139,23 @@ static void gen_confirm(struct nfs4_client *clp)
*p++ = i++;
}
static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t)
{
return idr_find(&cl->cl_stateids, t->si_opaque.so_id);
}
static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
{
struct nfs4_stid *s;
s = find_stateid(cl, t);
if (!s)
return NULL;
if (typemask & s->sc_type)
return s;
return NULL;
}
static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
struct svc_rqst *rqstp, nfs4_verifier *verf)
{
......@@ -1060,6 +1178,7 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
}
}
idr_init(&clp->cl_stateids);
memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
atomic_set(&clp->cl_refcount, 0);
clp->cl_cb_state = NFSD4_CB_UNKNOWN;
......@@ -1083,17 +1202,6 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
return clp;
}
static int check_name(struct xdr_netobj name)
{
if (name.len == 0)
return 0;
if (name.len > NFS4_OPAQUE_LIMIT) {
dprintk("NFSD: check_name: name too long(%d)!\n", name.len);
return 0;
}
return 1;
}
static void
add_to_unconfirmed(struct nfs4_client *clp, unsigned int strhashval)
{
......@@ -1125,9 +1233,11 @@ find_confirmed_client(clientid_t *clid)
unsigned int idhashval = clientid_hashval(clid->cl_id);
list_for_each_entry(clp, &conf_id_hashtbl[idhashval], cl_idhash) {
if (same_clid(&clp->cl_clientid, clid))
if (same_clid(&clp->cl_clientid, clid)) {
renew_client(clp);
return clp;
}
}
return NULL;
}
......@@ -1173,20 +1283,6 @@ find_unconfirmed_client_by_str(const char *dname, unsigned int hashval)
return NULL;
}
static void rpc_svcaddr2sockaddr(struct sockaddr *sa, unsigned short family, union svc_addr_u *svcaddr)
{
switch (family) {
case AF_INET:
((struct sockaddr_in *)sa)->sin_family = AF_INET;
((struct sockaddr_in *)sa)->sin_addr = svcaddr->addr;
return;
case AF_INET6:
((struct sockaddr_in6 *)sa)->sin6_family = AF_INET6;
((struct sockaddr_in6 *)sa)->sin6_addr = svcaddr->addr6;
return;
}
}
static void
gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
{
......@@ -1218,7 +1314,7 @@ gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_r
conn->cb_prog = se->se_callback_prog;
conn->cb_ident = se->se_callback_ident;
rpc_svcaddr2sockaddr((struct sockaddr *)&conn->cb_saddr, expected_family, &rqstp->rq_daddr);
memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
return;
out_err:
conn->cb_addr.ss_family = AF_UNSPEC;
......@@ -1350,7 +1446,7 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
__func__, rqstp, exid, exid->clname.len, exid->clname.data,
addr_str, exid->flags, exid->spa_how);
if (!check_name(exid->clname) || (exid->flags & ~EXCHGID4_FLAG_MASK_A))
if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
return nfserr_inval;
/* Currently only support SP4_NONE */
......@@ -1849,8 +1945,16 @@ nfsd4_sequence(struct svc_rqst *rqstp,
nfsd4_get_session(cstate->session);
atomic_inc(&clp->cl_refcount);
if (clp->cl_cb_state == NFSD4_CB_DOWN)
seq->status_flags |= SEQ4_STATUS_CB_PATH_DOWN;
switch (clp->cl_cb_state) {
case NFSD4_CB_DOWN:
seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
break;
case NFSD4_CB_FAULT:
seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
break;
default:
seq->status_flags = 0;
}
}
kfree(conn);
spin_unlock(&client_lock);
......@@ -1858,6 +1962,50 @@ nfsd4_sequence(struct svc_rqst *rqstp,
return status;
}
static inline bool has_resources(struct nfs4_client *clp)
{
return !list_empty(&clp->cl_openowners)
|| !list_empty(&clp->cl_delegations)
|| !list_empty(&clp->cl_sessions);
}
__be32
nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
{
struct nfs4_client *conf, *unconf, *clp;
int status = 0;
nfs4_lock_state();
unconf = find_unconfirmed_client(&dc->clientid);
conf = find_confirmed_client(&dc->clientid);
if (conf) {
clp = conf;
if (!is_client_expired(conf) && has_resources(conf)) {
status = nfserr_clientid_busy;
goto out;
}
/* rfc5661 18.50.3 */
if (cstate->session && conf == cstate->session->se_client) {
status = nfserr_clientid_busy;
goto out;
}
} else if (unconf)
clp = unconf;
else {
status = nfserr_stale_clientid;
goto out;
}
expire_client(clp);
out:
nfs4_unlock_state();
dprintk("%s return %d\n", __func__, ntohl(status));
return status;
}
__be32
nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
{
......@@ -1900,19 +2048,13 @@ __be32
nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_setclientid *setclid)
{
struct xdr_netobj clname = {
.len = setclid->se_namelen,
.data = setclid->se_name,
};
struct xdr_netobj clname = setclid->se_name;
nfs4_verifier clverifier = setclid->se_verf;
unsigned int strhashval;
struct nfs4_client *conf, *unconf, *new;
__be32 status;
char dname[HEXDIR_LEN];
if (!check_name(clname))
return nfserr_inval;
status = nfs4_make_rec_clidname(dname, &clname);
if (status)
return status;
......@@ -1946,7 +2088,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* of 5 bullet points, labeled as CASE0 - CASE4 below.
*/
unconf = find_unconfirmed_client_by_str(dname, strhashval);
status = nfserr_resource;
status = nfserr_jukebox;
if (!conf) {
/*
* RFC 3530 14.2.33 CASE 4:
......@@ -2116,21 +2258,21 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
return status;
}
static struct nfs4_file *nfsd4_alloc_file(void)
{
return kmem_cache_alloc(file_slab, GFP_KERNEL);
}
/* OPEN Share state helper functions */
static inline struct nfs4_file *
alloc_init_file(struct inode *ino)
static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino)
{
struct nfs4_file *fp;
unsigned int hashval = file_hashval(ino);
fp = kmem_cache_alloc(file_slab, GFP_KERNEL);
if (fp) {
atomic_set(&fp->fi_ref, 1);
INIT_LIST_HEAD(&fp->fi_hash);
INIT_LIST_HEAD(&fp->fi_stateids);
INIT_LIST_HEAD(&fp->fi_delegations);
fp->fi_inode = igrab(ino);
fp->fi_id = current_fileid++;
fp->fi_had_conflict = false;
fp->fi_lease = NULL;
memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
......@@ -2138,9 +2280,6 @@ alloc_init_file(struct inode *ino)
spin_lock(&recall_lock);
list_add(&fp->fi_hash, &file_hashtbl[hashval]);
spin_unlock(&recall_lock);
return fp;
}
return NULL;
}
static void
......@@ -2155,7 +2294,8 @@ nfsd4_free_slab(struct kmem_cache **slab)
void
nfsd4_free_slabs(void)
{
nfsd4_free_slab(&stateowner_slab);
nfsd4_free_slab(&openowner_slab);
nfsd4_free_slab(&lockowner_slab);
nfsd4_free_slab(&file_slab);
nfsd4_free_slab(&stateid_slab);
nfsd4_free_slab(&deleg_slab);
......@@ -2164,16 +2304,20 @@ nfsd4_free_slabs(void)
static int
nfsd4_init_slabs(void)
{
stateowner_slab = kmem_cache_create("nfsd4_stateowners",
sizeof(struct nfs4_stateowner), 0, 0, NULL);
if (stateowner_slab == NULL)
openowner_slab = kmem_cache_create("nfsd4_openowners",
sizeof(struct nfs4_openowner), 0, 0, NULL);
if (openowner_slab == NULL)
goto out_nomem;
lockowner_slab = kmem_cache_create("nfsd4_lockowners",
sizeof(struct nfs4_openowner), 0, 0, NULL);
if (lockowner_slab == NULL)
goto out_nomem;
file_slab = kmem_cache_create("nfsd4_files",
sizeof(struct nfs4_file), 0, 0, NULL);
if (file_slab == NULL)
goto out_nomem;
stateid_slab = kmem_cache_create("nfsd4_stateids",
sizeof(struct nfs4_stateid), 0, 0, NULL);
sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
if (stateid_slab == NULL)
goto out_nomem;
deleg_slab = kmem_cache_create("nfsd4_delegations",
......@@ -2187,97 +2331,94 @@ nfsd4_init_slabs(void)
return -ENOMEM;
}
void
nfs4_free_stateowner(struct kref *kref)
void nfs4_free_openowner(struct nfs4_openowner *oo)
{
struct nfs4_stateowner *sop =
container_of(kref, struct nfs4_stateowner, so_ref);
kfree(sop->so_owner.data);
kmem_cache_free(stateowner_slab, sop);
kfree(oo->oo_owner.so_owner.data);
kmem_cache_free(openowner_slab, oo);
}
static inline struct nfs4_stateowner *
alloc_stateowner(struct xdr_netobj *owner)
void nfs4_free_lockowner(struct nfs4_lockowner *lo)
{
struct nfs4_stateowner *sop;
kfree(lo->lo_owner.so_owner.data);
kmem_cache_free(lockowner_slab, lo);
}
if ((sop = kmem_cache_alloc(stateowner_slab, GFP_KERNEL))) {
if ((sop->so_owner.data = kmalloc(owner->len, GFP_KERNEL))) {
memcpy(sop->so_owner.data, owner->data, owner->len);
sop->so_owner.len = owner->len;
kref_init(&sop->so_ref);
return sop;
}
kmem_cache_free(stateowner_slab, sop);
}
return NULL;
static void init_nfs4_replay(struct nfs4_replay *rp)
{
rp->rp_status = nfserr_serverfault;
rp->rp_buflen = 0;
rp->rp_buf = rp->rp_ibuf;
}
static struct nfs4_stateowner *
alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) {
static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
{
struct nfs4_stateowner *sop;
struct nfs4_replay *rp;
unsigned int idhashval;
if (!(sop = alloc_stateowner(&open->op_owner)))
sop = kmem_cache_alloc(slab, GFP_KERNEL);
if (!sop)
return NULL;
sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
if (!sop->so_owner.data) {
kmem_cache_free(slab, sop);
return NULL;
idhashval = ownerid_hashval(current_ownerid);
INIT_LIST_HEAD(&sop->so_idhash);
INIT_LIST_HEAD(&sop->so_strhash);
INIT_LIST_HEAD(&sop->so_perclient);
}
sop->so_owner.len = owner->len;
INIT_LIST_HEAD(&sop->so_stateids);
INIT_LIST_HEAD(&sop->so_perstateid); /* not used */
INIT_LIST_HEAD(&sop->so_close_lru);
sop->so_time = 0;
list_add(&sop->so_idhash, &ownerid_hashtbl[idhashval]);
list_add(&sop->so_strhash, &ownerstr_hashtbl[strhashval]);
list_add(&sop->so_perclient, &clp->cl_openowners);
sop->so_is_open_owner = 1;
sop->so_id = current_ownerid++;
sop->so_client = clp;
sop->so_seqid = open->op_seqid;
sop->so_confirmed = 0;
rp = &sop->so_replay;
rp->rp_status = nfserr_serverfault;
rp->rp_buflen = 0;
rp->rp_buf = rp->rp_ibuf;
init_nfs4_replay(&sop->so_replay);
return sop;
}
static inline void
init_stateid(struct nfs4_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
struct nfs4_stateowner *sop = open->op_stateowner;
unsigned int hashval = stateid_hashval(sop->so_id, fp->fi_id);
static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
{
list_add(&oo->oo_owner.so_strhash, &open_ownerstr_hashtbl[strhashval]);
list_add(&oo->oo_perclient, &clp->cl_openowners);
}
INIT_LIST_HEAD(&stp->st_hash);
INIT_LIST_HEAD(&stp->st_perstateowner);
static struct nfs4_openowner *
alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) {
struct nfs4_openowner *oo;
oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
if (!oo)
return NULL;
oo->oo_owner.so_is_open_owner = 1;
oo->oo_owner.so_seqid = open->op_seqid;
oo->oo_flags = NFS4_OO_NEW;
oo->oo_time = 0;
oo->oo_last_closed_stid = NULL;
INIT_LIST_HEAD(&oo->oo_close_lru);
hash_openowner(oo, clp, strhashval);
return oo;
}
static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
struct nfs4_openowner *oo = open->op_openowner;
struct nfs4_client *clp = oo->oo_owner.so_client;
init_stid(&stp->st_stid, clp, NFS4_OPEN_STID);
INIT_LIST_HEAD(&stp->st_lockowners);
INIT_LIST_HEAD(&stp->st_perfile);
list_add(&stp->st_hash, &stateid_hashtbl[hashval]);
list_add(&stp->st_perstateowner, &sop->so_stateids);
list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
list_add(&stp->st_perfile, &fp->fi_stateids);
stp->st_stateowner = sop;
stp->st_stateowner = &oo->oo_owner;
get_nfs4_file(fp);
stp->st_file = fp;
stp->st_stateid.si_boot = boot_time;
stp->st_stateid.si_stateownerid = sop->so_id;
stp->st_stateid.si_fileid = fp->fi_id;
stp->st_stateid.si_generation = 0;
stp->st_access_bmap = 0;
stp->st_deny_bmap = 0;
__set_bit(open->op_share_access & ~NFS4_SHARE_WANT_MASK,
&stp->st_access_bmap);
__set_bit(open->op_share_access, &stp->st_access_bmap);
__set_bit(open->op_share_deny, &stp->st_deny_bmap);
stp->st_openstp = NULL;
}
static void
move_to_close_lru(struct nfs4_stateowner *sop)
move_to_close_lru(struct nfs4_openowner *oo)
{
dprintk("NFSD: move_to_close_lru nfs4_stateowner %p\n", sop);
dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
list_move_tail(&sop->so_close_lru, &close_lru);
sop->so_time = get_seconds();
list_move_tail(&oo->oo_close_lru, &close_lru);
oo->oo_time = get_seconds();
}
static int
......@@ -2289,14 +2430,18 @@ same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner,
(sop->so_client->cl_clientid.cl_id == clid->cl_id);
}
static struct nfs4_stateowner *
static struct nfs4_openowner *
find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open)
{
struct nfs4_stateowner *so = NULL;
struct nfs4_stateowner *so;
struct nfs4_openowner *oo;
list_for_each_entry(so, &ownerstr_hashtbl[hashval], so_strhash) {
if (same_owner_str(so, &open->op_owner, &open->op_clientid))
return so;
list_for_each_entry(so, &open_ownerstr_hashtbl[hashval], so_strhash) {
if (same_owner_str(so, &open->op_owner, &open->op_clientid)) {
oo = openowner(so);
renew_client(oo->oo_owner.so_client);
return oo;
}
}
return NULL;
}
......@@ -2320,31 +2465,6 @@ find_file(struct inode *ino)
return NULL;
}
static inline int access_valid(u32 x, u32 minorversion)
{
if ((x & NFS4_SHARE_ACCESS_MASK) < NFS4_SHARE_ACCESS_READ)
return 0;
if ((x & NFS4_SHARE_ACCESS_MASK) > NFS4_SHARE_ACCESS_BOTH)
return 0;
x &= ~NFS4_SHARE_ACCESS_MASK;
if (minorversion && x) {
if ((x & NFS4_SHARE_WANT_MASK) > NFS4_SHARE_WANT_CANCEL)
return 0;
if ((x & NFS4_SHARE_WHEN_MASK) > NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED)
return 0;
x &= ~(NFS4_SHARE_WANT_MASK | NFS4_SHARE_WHEN_MASK);
}
if (x)
return 0;
return 1;
}
static inline int deny_valid(u32 x)
{
/* Note: unlike access bits, deny bits may be zero. */
return x <= NFS4_SHARE_DENY_BOTH;
}
/*
* Called to check deny when READ with all zero stateid or
* WRITE with all zero or all one stateid
......@@ -2354,7 +2474,7 @@ nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
{
struct inode *ino = current_fh->fh_dentry->d_inode;
struct nfs4_file *fp;
struct nfs4_stateid *stp;
struct nfs4_ol_stateid *stp;
__be32 ret;
dprintk("NFSD: nfs4_share_conflict\n");
......@@ -2429,6 +2549,16 @@ static const struct lock_manager_operations nfsd_lease_mng_ops = {
.lm_change = nfsd_change_deleg_cb,
};
static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
{
if (nfsd4_has_session(cstate))
return nfs_ok;
if (seqid == so->so_seqid - 1)
return nfserr_replay_me;
if (seqid == so->so_seqid)
return nfs_ok;
return nfserr_bad_seqid;
}
__be32
nfsd4_process_open1(struct nfsd4_compound_state *cstate,
......@@ -2437,57 +2567,49 @@ nfsd4_process_open1(struct nfsd4_compound_state *cstate,
clientid_t *clientid = &open->op_clientid;
struct nfs4_client *clp = NULL;
unsigned int strhashval;
struct nfs4_stateowner *sop = NULL;
if (!check_name(open->op_owner))
return nfserr_inval;
struct nfs4_openowner *oo = NULL;
__be32 status;
if (STALE_CLIENTID(&open->op_clientid))
return nfserr_stale_clientid;
/*
* In case we need it later, after we've already created the
* file and don't want to risk a further failure:
*/
open->op_file = nfsd4_alloc_file();
if (open->op_file == NULL)
return nfserr_jukebox;
strhashval = ownerstr_hashval(clientid->cl_id, open->op_owner);
sop = find_openstateowner_str(strhashval, open);
open->op_stateowner = sop;
if (!sop) {
/* Make sure the client's lease hasn't expired. */
strhashval = open_ownerstr_hashval(clientid->cl_id, &open->op_owner);
oo = find_openstateowner_str(strhashval, open);
open->op_openowner = oo;
if (!oo) {
clp = find_confirmed_client(clientid);
if (clp == NULL)
return nfserr_expired;
goto renew;
goto new_owner;
}
/* When sessions are used, skip open sequenceid processing */
if (nfsd4_has_session(cstate))
goto renew;
if (!sop->so_confirmed) {
if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
/* Replace unconfirmed owners without checking for replay. */
clp = sop->so_client;
release_openowner(sop);
open->op_stateowner = NULL;
goto renew;
}
if (open->op_seqid == sop->so_seqid - 1) {
if (sop->so_replay.rp_buflen)
return nfserr_replay_me;
/* The original OPEN failed so spectacularly
* that we don't even have replay data saved!
* Therefore, we have no choice but to continue
* processing this OPEN; presumably, we'll
* fail again for the same reason.
*/
dprintk("nfsd4_process_open1: replay with no replay cache\n");
goto renew;
clp = oo->oo_owner.so_client;
release_openowner(oo);
open->op_openowner = NULL;
goto new_owner;
}
if (open->op_seqid != sop->so_seqid)
return nfserr_bad_seqid;
renew:
if (open->op_stateowner == NULL) {
sop = alloc_init_open_stateowner(strhashval, clp, open);
if (sop == NULL)
return nfserr_resource;
open->op_stateowner = sop;
}
list_del_init(&sop->so_close_lru);
renew_client(sop->so_client);
status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
if (status)
return status;
clp = oo->oo_owner.so_client;
goto alloc_stateid;
new_owner:
oo = alloc_init_open_stateowner(strhashval, clp, open);
if (oo == NULL)
return nfserr_jukebox;
open->op_openowner = oo;
alloc_stateid:
open->op_stp = nfs4_alloc_stateid(clp);
if (!open->op_stp)
return nfserr_jukebox;
return nfs_ok;
}
......@@ -2500,36 +2622,37 @@ nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
return nfs_ok;
}
static struct nfs4_delegation *
find_delegation_file(struct nfs4_file *fp, stateid_t *stid)
static int share_access_to_flags(u32 share_access)
{
struct nfs4_delegation *dp;
share_access &= ~NFS4_SHARE_WANT_MASK;
spin_lock(&recall_lock);
list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) {
spin_unlock(&recall_lock);
return dp;
}
spin_unlock(&recall_lock);
return NULL;
return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
}
static int share_access_to_flags(u32 share_access)
static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
{
share_access &= ~NFS4_SHARE_WANT_MASK;
struct nfs4_stid *ret;
return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
if (!ret)
return NULL;
return delegstateid(ret);
}
static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
{
return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
}
static __be32
nfs4_check_deleg(struct nfs4_file *fp, struct nfsd4_open *open,
nfs4_check_deleg(struct nfs4_client *cl, struct nfs4_file *fp, struct nfsd4_open *open,
struct nfs4_delegation **dp)
{
int flags;
__be32 status = nfserr_bad_stateid;
*dp = find_delegation_file(fp, &open->op_delegate_stateid);
*dp = find_deleg_stateid(cl, &open->op_delegate_stateid);
if (*dp == NULL)
goto out;
flags = share_access_to_flags(open->op_share_access);
......@@ -2537,41 +2660,37 @@ nfs4_check_deleg(struct nfs4_file *fp, struct nfsd4_open *open,
if (status)
*dp = NULL;
out:
if (open->op_claim_type != NFS4_OPEN_CLAIM_DELEGATE_CUR)
if (!nfsd4_is_deleg_cur(open))
return nfs_ok;
if (status)
return status;
open->op_stateowner->so_confirmed = 1;
open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
return nfs_ok;
}
static __be32
nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_stateid **stpp)
nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_stateid **stpp)
{
struct nfs4_stateid *local;
__be32 status = nfserr_share_denied;
struct nfs4_stateowner *sop = open->op_stateowner;
struct nfs4_ol_stateid *local;
struct nfs4_openowner *oo = open->op_openowner;
list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
/* ignore lock owners */
if (local->st_stateowner->so_is_open_owner == 0)
continue;
/* remember if we have seen this open owner */
if (local->st_stateowner == sop)
if (local->st_stateowner == &oo->oo_owner)
*stpp = local;
/* check for conflicting share reservations */
if (!test_share(local, open))
goto out;
return nfserr_share_denied;
}
status = 0;
out:
return status;
return nfs_ok;
}
static inline struct nfs4_stateid *
nfs4_alloc_stateid(void)
static void nfs4_free_stateid(struct nfs4_ol_stateid *s)
{
return kmem_cache_alloc(stateid_slab, GFP_KERNEL);
kmem_cache_free(stateid_slab, s);
}
static inline int nfs4_access_to_access(u32 nfs4_access)
......@@ -2592,12 +2711,6 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
int oflag = nfs4_access_to_omode(open->op_share_access);
int access = nfs4_access_to_access(open->op_share_access);
/* CLAIM_DELEGATE_CUR is used in response to a broken lease;
* allowing it to break the lease and return EAGAIN leaves the
* client unable to make progress in returning the delegation */
if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR)
access |= NFSD_MAY_NOT_BREAK_LEASE;
if (!fp->fi_fds[oflag]) {
status = nfsd_open(rqstp, cur_fh, S_IFREG, access,
&fp->fi_fds[oflag]);
......@@ -2609,27 +2722,6 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
return nfs_ok;
}
static __be32
nfs4_new_open(struct svc_rqst *rqstp, struct nfs4_stateid **stpp,
struct nfs4_file *fp, struct svc_fh *cur_fh,
struct nfsd4_open *open)
{
struct nfs4_stateid *stp;
__be32 status;
stp = nfs4_alloc_stateid();
if (stp == NULL)
return nfserr_resource;
status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
if (status) {
kmem_cache_free(stateid_slab, stp);
return status;
}
*stpp = stp;
return 0;
}
static inline __be32
nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
struct nfsd4_open *open)
......@@ -2646,9 +2738,9 @@ nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
}
static __be32
nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_stateid *stp, struct nfsd4_open *open)
nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
{
u32 op_share_access = open->op_share_access & ~NFS4_SHARE_WANT_MASK;
u32 op_share_access = open->op_share_access;
bool new_access;
__be32 status;
......@@ -2677,8 +2769,8 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *c
static void
nfs4_set_claim_prev(struct nfsd4_open *open)
{
open->op_stateowner->so_confirmed = 1;
open->op_stateowner->so_client->cl_firststate = 1;
open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
open->op_openowner->oo_owner.so_client->cl_firststate = 1;
}
/* Should we give out recallable state?: */
......@@ -2721,7 +2813,7 @@ static int nfs4_setlease(struct nfs4_delegation *dp, int flag)
if (!fl)
return -ENOMEM;
fl->fl_file = find_readable_file(fp);
list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations);
list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
if (status) {
list_del_init(&dp->dl_perclnt);
......@@ -2750,7 +2842,7 @@ static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag)
atomic_inc(&fp->fi_delegees);
list_add(&dp->dl_perfile, &fp->fi_delegations);
spin_unlock(&recall_lock);
list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations);
list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
return 0;
}
......@@ -2758,14 +2850,14 @@ static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag)
* Attempt to hand out a delegation.
*/
static void
nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_stateid *stp)
nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_ol_stateid *stp)
{
struct nfs4_delegation *dp;
struct nfs4_stateowner *sop = stp->st_stateowner;
struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner);
int cb_up;
int status, flag = 0;
cb_up = nfsd4_cb_channel_good(sop->so_client);
cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
flag = NFS4_OPEN_DELEGATE_NONE;
open->op_recall = 0;
switch (open->op_claim_type) {
......@@ -2781,7 +2873,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
* had the chance to reclaim theirs.... */
if (locks_in_grace())
goto out;
if (!cb_up || !sop->so_confirmed)
if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
goto out;
if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
flag = NFS4_OPEN_DELEGATE_WRITE;
......@@ -2792,17 +2884,17 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
goto out;
}
dp = alloc_init_deleg(sop->so_client, stp, fh, flag);
dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh, flag);
if (dp == NULL)
goto out_no_deleg;
status = nfs4_set_delegation(dp, flag);
if (status)
goto out_free;
memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid));
memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
STATEID_VAL(&dp->dl_stateid));
STATEID_VAL(&dp->dl_stid.sc_stateid));
out:
if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS
&& flag == NFS4_OPEN_DELEGATE_NONE
......@@ -2824,16 +2916,13 @@ __be32
nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
{
struct nfsd4_compoundres *resp = rqstp->rq_resp;
struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
struct nfs4_file *fp = NULL;
struct inode *ino = current_fh->fh_dentry->d_inode;
struct nfs4_stateid *stp = NULL;
struct nfs4_ol_stateid *stp = NULL;
struct nfs4_delegation *dp = NULL;
__be32 status;
status = nfserr_inval;
if (!access_valid(open->op_share_access, resp->cstate.minorversion)
|| !deny_valid(open->op_share_deny))
goto out;
/*
* Lookup file; if found, lookup stateid and check open request,
* and check for delegations in the process of being recalled.
......@@ -2843,17 +2932,17 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
if (fp) {
if ((status = nfs4_check_open(fp, open, &stp)))
goto out;
status = nfs4_check_deleg(fp, open, &dp);
status = nfs4_check_deleg(cl, fp, open, &dp);
if (status)
goto out;
} else {
status = nfserr_bad_stateid;
if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR)
goto out;
status = nfserr_resource;
fp = alloc_init_file(ino);
if (fp == NULL)
if (nfsd4_is_deleg_cur(open))
goto out;
status = nfserr_jukebox;
fp = open->op_file;
open->op_file = NULL;
nfsd4_init_file(fp, ino);
}
/*
......@@ -2865,24 +2954,24 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
if (status)
goto out;
update_stateid(&stp->st_stateid);
} else {
status = nfs4_new_open(rqstp, &stp, fp, current_fh, open);
status = nfs4_get_vfs_file(rqstp, fp, current_fh, open);
if (status)
goto out;
init_stateid(stp, fp, open);
stp = open->op_stp;
open->op_stp = NULL;
init_open_stateid(stp, fp, open);
status = nfsd4_truncate(rqstp, current_fh, open);
if (status) {
release_open_stateid(stp);
goto out;
}
if (nfsd4_has_session(&resp->cstate))
update_stateid(&stp->st_stateid);
}
memcpy(&open->op_stateid, &stp->st_stateid, sizeof(stateid_t));
update_stateid(&stp->st_stid.sc_stateid);
memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
if (nfsd4_has_session(&resp->cstate))
open->op_stateowner->so_confirmed = 1;
open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
/*
* Attempt to hand out a delegation. No error return, because the
......@@ -2893,7 +2982,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
status = nfs_ok;
dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
STATEID_VAL(&stp->st_stateid));
STATEID_VAL(&stp->st_stid.sc_stateid));
out:
if (fp)
put_nfs4_file(fp);
......@@ -2903,13 +2992,34 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
* To finish the open response, we just need to set the rflags.
*/
open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
if (!open->op_stateowner->so_confirmed &&
if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
!nfsd4_has_session(&resp->cstate))
open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
return status;
}
void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status)
{
if (open->op_openowner) {
struct nfs4_openowner *oo = open->op_openowner;
if (!list_empty(&oo->oo_owner.so_stateids))
list_del_init(&oo->oo_close_lru);
if (oo->oo_flags & NFS4_OO_NEW) {
if (status) {
release_openowner(oo);
open->op_openowner = NULL;
} else
oo->oo_flags &= ~NFS4_OO_NEW;
}
}
if (open->op_file)
nfsd4_free_file(open->op_file);
if (open->op_stp)
nfs4_free_stateid(open->op_stp);
}
__be32
nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
clientid_t *clid)
......@@ -2930,7 +3040,6 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
dprintk("nfsd4_renew: clientid not found!\n");
goto out;
}
renew_client(clp);
status = nfserr_cb_path_down;
if (!list_empty(&clp->cl_delegations)
&& clp->cl_cb_state != NFSD4_CB_UP)
......@@ -2962,7 +3071,7 @@ static time_t
nfs4_laundromat(void)
{
struct nfs4_client *clp;
struct nfs4_stateowner *sop;
struct nfs4_openowner *oo;
struct nfs4_delegation *dp;
struct list_head *pos, *next, reaplist;
time_t cutoff = get_seconds() - nfsd4_lease;
......@@ -3019,16 +3128,14 @@ nfs4_laundromat(void)
}
test_val = nfsd4_lease;
list_for_each_safe(pos, next, &close_lru) {
sop = list_entry(pos, struct nfs4_stateowner, so_close_lru);
if (time_after((unsigned long)sop->so_time, (unsigned long)cutoff)) {
u = sop->so_time - cutoff;
oo = container_of(pos, struct nfs4_openowner, oo_close_lru);
if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) {
u = oo->oo_time - cutoff;
if (test_val > u)
test_val = u;
break;
}
dprintk("NFSD: purging unused open stateowner (so_id %d)\n",
sop->so_id);
release_openowner(sop);
release_openowner(oo);
}
if (clientid_val < NFSD_LAUNDROMAT_MINTIMEOUT)
clientid_val = NFSD_LAUNDROMAT_MINTIMEOUT;
......@@ -3050,30 +3157,17 @@ laundromat_main(struct work_struct *not_used)
queue_delayed_work(laundry_wq, &laundromat_work, t*HZ);
}
static struct nfs4_stateowner *
search_close_lru(u32 st_id, int flags)
{
struct nfs4_stateowner *local = NULL;
if (flags & CLOSE_STATE) {
list_for_each_entry(local, &close_lru, so_close_lru) {
if (local->so_id == st_id)
return local;
}
}
return NULL;
}
static inline int
nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stateid *stp)
static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
{
return fhp->fh_dentry->d_inode != stp->st_file->fi_inode;
if (fhp->fh_dentry->d_inode != stp->st_file->fi_inode)
return nfserr_bad_stateid;
return nfs_ok;
}
static int
STALE_STATEID(stateid_t *stateid)
{
if (stateid->si_boot == boot_time)
if (stateid->si_opaque.so_clid.cl_boot == boot_time)
return 0;
dprintk("NFSD: stale stateid " STATEID_FMT "!\n",
STATEID_VAL(stateid));
......@@ -3096,7 +3190,7 @@ access_permit_write(unsigned long access_bmap)
}
static
__be32 nfs4_check_openmode(struct nfs4_stateid *stp, int flags)
__be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
{
__be32 status = nfserr_openmode;
......@@ -3139,68 +3233,80 @@ grace_disallows_io(struct inode *inode)
return locks_in_grace() && mandatory_lock(inode);
}
static int check_stateid_generation(stateid_t *in, stateid_t *ref, int flags)
/* Returns true iff a is later than b: */
static bool stateid_generation_after(stateid_t *a, stateid_t *b)
{
return (s32)a->si_generation - (s32)b->si_generation > 0;
}
static int check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
{
/*
* When sessions are used the stateid generation number is ignored
* when it is zero.
*/
if ((flags & HAS_SESSION) && in->si_generation == 0)
goto out;
if (has_session && in->si_generation == 0)
return nfs_ok;
if (in->si_generation == ref->si_generation)
return nfs_ok;
/* If the client sends us a stateid from the future, it's buggy: */
if (in->si_generation > ref->si_generation)
if (stateid_generation_after(in, ref))
return nfserr_bad_stateid;
/*
* The following, however, can happen. For example, if the
* client sends an open and some IO at the same time, the open
* may bump si_generation while the IO is still in flight.
* Thanks to hard links and renames, the client never knows what
* file an open will affect. So it could avoid that situation
* only by serializing all opens and IO from the same open
* owner. To recover from the old_stateid error, the client
* will just have to retry the IO:
* However, we could see a stateid from the past, even from a
* non-buggy client. For example, if the client sends a lock
* while some IO is outstanding, the lock may bump si_generation
* while the IO is still in flight. The client could avoid that
* situation by waiting for responses on all the IO requests,
* but better performance may result in retrying IO that
* receives an old_stateid error if requests are rarely
* reordered in flight:
*/
if (in->si_generation < ref->si_generation)
return nfserr_old_stateid;
out:
return nfs_ok;
}
static int is_delegation_stateid(stateid_t *stateid)
__be32 nfs4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
{
return stateid->si_fileid == 0;
}
struct nfs4_stid *s;
struct nfs4_ol_stateid *ols;
__be32 status;
static int is_open_stateid(struct nfs4_stateid *stateid)
{
return stateid->st_openstp == NULL;
if (STALE_STATEID(stateid))
return nfserr_stale_stateid;
s = find_stateid(cl, stateid);
if (!s)
return nfserr_stale_stateid;
status = check_stateid_generation(stateid, &s->sc_stateid, 1);
if (status)
return status;
if (!(s->sc_type & (NFS4_OPEN_STID | NFS4_LOCK_STID)))
return nfs_ok;
ols = openlockstateid(s);
if (ols->st_stateowner->so_is_open_owner
&& !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
return nfserr_bad_stateid;
return nfs_ok;
}
__be32 nfs4_validate_stateid(stateid_t *stateid, int flags)
static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, struct nfs4_stid **s)
{
struct nfs4_stateid *stp = NULL;
__be32 status = nfserr_stale_stateid;
struct nfs4_client *cl;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
return nfserr_bad_stateid;
if (STALE_STATEID(stateid))
goto out;
status = nfserr_expired;
stp = search_for_stateid(stateid);
if (!stp)
goto out;
status = nfserr_bad_stateid;
if (!stp->st_stateowner->so_confirmed)
goto out;
status = check_stateid_generation(stateid, &stp->st_stateid, flags);
if (status)
goto out;
return nfserr_stale_stateid;
cl = find_confirmed_client(&stateid->si_opaque.so_clid);
if (!cl)
return nfserr_expired;
*s = find_stateid_by_type(cl, stateid, typemask);
if (!*s)
return nfserr_bad_stateid;
return nfs_ok;
status = nfs_ok;
out:
return status;
}
/*
......@@ -3210,7 +3316,8 @@ __be32
nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
stateid_t *stateid, int flags, struct file **filpp)
{
struct nfs4_stateid *stp = NULL;
struct nfs4_stid *s;
struct nfs4_ol_stateid *stp = NULL;
struct nfs4_delegation *dp = NULL;
struct svc_fh *current_fh = &cstate->current_fh;
struct inode *ino = current_fh->fh_dentry->d_inode;
......@@ -3222,60 +3329,47 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
if (grace_disallows_io(ino))
return nfserr_grace;
if (nfsd4_has_session(cstate))
flags |= HAS_SESSION;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
return check_special_stateids(current_fh, stateid, flags);
status = nfserr_stale_stateid;
if (STALE_STATEID(stateid))
goto out;
/*
* We assume that any stateid that has the current boot time,
* but that we can't find, is expired:
*/
status = nfserr_expired;
if (is_delegation_stateid(stateid)) {
dp = find_delegation_stateid(ino, stateid);
if (!dp)
goto out;
status = check_stateid_generation(stateid, &dp->dl_stateid,
flags);
status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, &s);
if (status)
return status;
status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
if (status)
goto out;
switch (s->sc_type) {
case NFS4_DELEG_STID:
dp = delegstateid(s);
status = nfs4_check_delegmode(dp, flags);
if (status)
goto out;
renew_client(dp->dl_client);
if (filpp) {
*filpp = dp->dl_file->fi_deleg_file;
BUG_ON(!*filpp);
}
} else { /* open or lock stateid */
stp = find_stateid(stateid, flags);
if (!stp)
goto out;
status = nfserr_bad_stateid;
if (nfs4_check_fh(current_fh, stp))
goto out;
if (!stp->st_stateowner->so_confirmed)
goto out;
status = check_stateid_generation(stateid, &stp->st_stateid,
flags);
break;
case NFS4_OPEN_STID:
case NFS4_LOCK_STID:
stp = openlockstateid(s);
status = nfs4_check_fh(current_fh, stp);
if (status)
goto out;
if (stp->st_stateowner->so_is_open_owner
&& !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
goto out;
status = nfs4_check_openmode(stp, flags);
if (status)
goto out;
renew_client(stp->st_stateowner->so_client);
if (filpp) {
if (flags & RD_STATE)
*filpp = find_readable_file(stp->st_file);
else
*filpp = find_writeable_file(stp->st_file);
}
break;
default:
return nfserr_bad_stateid;
}
status = nfs_ok;
out:
......@@ -3283,18 +3377,9 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
}
static __be32
nfsd4_free_delegation_stateid(stateid_t *stateid)
{
struct nfs4_delegation *dp = search_for_delegation(stateid);
if (dp)
return nfserr_locks_held;
return nfserr_bad_stateid;
}
static __be32
nfsd4_free_lock_stateid(struct nfs4_stateid *stp)
nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
{
if (check_for_locks(stp->st_file, stp->st_stateowner))
if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
return nfserr_locks_held;
release_lock_stateid(stp);
return nfs_ok;
......@@ -3307,51 +3392,40 @@ __be32
nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_test_stateid *test_stateid)
{
test_stateid->ts_has_session = nfsd4_has_session(cstate);
/* real work is done during encoding */
return nfs_ok;
}
/*
* Free a state id
*/
__be32
nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_free_stateid *free_stateid)
{
stateid_t *stateid = &free_stateid->fr_stateid;
struct nfs4_stateid *stp;
__be32 ret;
struct nfs4_stid *s;
struct nfs4_client *cl = cstate->session->se_client;
__be32 ret = nfserr_bad_stateid;
nfs4_lock_state();
if (is_delegation_stateid(stateid)) {
ret = nfsd4_free_delegation_stateid(stateid);
goto out;
}
stp = search_for_stateid(stateid);
if (!stp) {
ret = nfserr_bad_stateid;
goto out;
}
if (stateid->si_generation != 0) {
if (stateid->si_generation < stp->st_stateid.si_generation) {
ret = nfserr_old_stateid;
goto out;
}
if (stateid->si_generation > stp->st_stateid.si_generation) {
ret = nfserr_bad_stateid;
s = find_stateid(cl, stateid);
if (!s)
goto out;
}
}
if (is_open_stateid(stp)) {
switch (s->sc_type) {
case NFS4_DELEG_STID:
ret = nfserr_locks_held;
goto out;
} else {
ret = nfsd4_free_lock_stateid(stp);
case NFS4_OPEN_STID:
case NFS4_LOCK_STID:
ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
if (ret)
goto out;
if (s->sc_type == NFS4_LOCK_STID)
ret = nfsd4_free_lock_stateid(openlockstateid(s));
else
ret = nfserr_locks_held;
break;
default:
ret = nfserr_bad_stateid;
}
out:
nfs4_unlock_state();
return ret;
......@@ -3364,124 +3438,64 @@ setlkflg (int type)
RD_STATE : WR_STATE;
}
static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
{
struct svc_fh *current_fh = &cstate->current_fh;
struct nfs4_stateowner *sop = stp->st_stateowner;
__be32 status;
status = nfsd4_check_seqid(cstate, sop, seqid);
if (status)
return status;
if (stp->st_stid.sc_type == NFS4_CLOSED_STID)
/*
* "Closed" stateid's exist *only* to return
* nfserr_replay_me from the previous step.
*/
return nfserr_bad_stateid;
status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
if (status)
return status;
return nfs4_check_fh(current_fh, stp);
}
/*
* Checks for sequence id mutating operations.
*/
static __be32
nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
stateid_t *stateid, int flags,
struct nfs4_stateowner **sopp,
struct nfs4_stateid **stpp, struct nfsd4_lock *lock)
stateid_t *stateid, char typemask,
struct nfs4_ol_stateid **stpp)
{
struct nfs4_stateid *stp;
struct nfs4_stateowner *sop;
struct svc_fh *current_fh = &cstate->current_fh;
__be32 status;
struct nfs4_stid *s;
dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
seqid, STATEID_VAL(stateid));
*stpp = NULL;
*sopp = NULL;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
dprintk("NFSD: preprocess_seqid_op: magic stateid!\n");
return nfserr_bad_stateid;
}
if (STALE_STATEID(stateid))
return nfserr_stale_stateid;
if (nfsd4_has_session(cstate))
flags |= HAS_SESSION;
/*
* We return BAD_STATEID if filehandle doesn't match stateid,
* the confirmed flag is incorrecly set, or the generation
* number is incorrect.
*/
stp = find_stateid(stateid, flags);
if (stp == NULL) {
/*
* Also, we should make sure this isn't just the result of
* a replayed close:
*/
sop = search_close_lru(stateid->si_stateownerid, flags);
/* It's not stale; let's assume it's expired: */
if (sop == NULL)
return nfserr_expired;
*sopp = sop;
goto check_replay;
}
*stpp = stp;
*sopp = sop = stp->st_stateowner;
if (lock) {
clientid_t *lockclid = &lock->v.new.clientid;
struct nfs4_client *clp = sop->so_client;
int lkflg = 0;
__be32 status;
lkflg = setlkflg(lock->lk_type);
if (lock->lk_is_new) {
if (!sop->so_is_open_owner)
return nfserr_bad_stateid;
if (!(flags & HAS_SESSION) &&
!same_clid(&clp->cl_clientid, lockclid))
return nfserr_bad_stateid;
/* stp is the open stateid */
status = nfs4_check_openmode(stp, lkflg);
status = nfsd4_lookup_stateid(stateid, typemask, &s);
if (status)
return status;
} else {
/* stp is the lock stateid */
status = nfs4_check_openmode(stp->st_openstp, lkflg);
if (status)
return status;
}
}
*stpp = openlockstateid(s);
cstate->replay_owner = (*stpp)->st_stateowner;
if (nfs4_check_fh(current_fh, stp)) {
dprintk("NFSD: preprocess_seqid_op: fh-stateid mismatch!\n");
return nfserr_bad_stateid;
}
return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp);
}
/*
* We now validate the seqid and stateid generation numbers.
* For the moment, we ignore the possibility of
* generation number wraparound.
*/
if (!(flags & HAS_SESSION) && seqid != sop->so_seqid)
goto check_replay;
static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, stateid_t *stateid, struct nfs4_ol_stateid **stpp)
{
__be32 status;
struct nfs4_openowner *oo;
if (sop->so_confirmed && flags & CONFIRM) {
dprintk("NFSD: preprocess_seqid_op: expected"
" unconfirmed stateowner!\n");
return nfserr_bad_stateid;
}
if (!sop->so_confirmed && !(flags & CONFIRM)) {
dprintk("NFSD: preprocess_seqid_op: stateowner not"
" confirmed yet!\n");
return nfserr_bad_stateid;
}
status = check_stateid_generation(stateid, &stp->st_stateid, flags);
status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
NFS4_OPEN_STID, stpp);
if (status)
return status;
renew_client(sop->so_client);
oo = openowner((*stpp)->st_stateowner);
if (!(oo->oo_flags & NFS4_OO_CONFIRMED))
return nfserr_bad_stateid;
return nfs_ok;
check_replay:
if (seqid == sop->so_seqid - 1) {
dprintk("NFSD: preprocess_seqid_op: retransmission?\n");
/* indicate replay to calling function */
return nfserr_replay_me;
}
dprintk("NFSD: preprocess_seqid_op: bad seqid (expected %d, got %d)\n",
sop->so_seqid, seqid);
*sopp = NULL;
return nfserr_bad_seqid;
}
__be32
......@@ -3489,8 +3503,8 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_open_confirm *oc)
{
__be32 status;
struct nfs4_stateowner *sop;
struct nfs4_stateid *stp;
struct nfs4_openowner *oo;
struct nfs4_ol_stateid *stp;
dprintk("NFSD: nfsd4_open_confirm on file %.*s\n",
(int)cstate->current_fh.fh_dentry->d_name.len,
......@@ -3502,38 +3516,52 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfs4_lock_state();
if ((status = nfs4_preprocess_seqid_op(cstate,
status = nfs4_preprocess_seqid_op(cstate,
oc->oc_seqid, &oc->oc_req_stateid,
CONFIRM | OPEN_STATE,
&oc->oc_stateowner, &stp, NULL)))
NFS4_OPEN_STID, &stp);
if (status)
goto out;
sop = oc->oc_stateowner;
sop->so_confirmed = 1;
update_stateid(&stp->st_stateid);
memcpy(&oc->oc_resp_stateid, &stp->st_stateid, sizeof(stateid_t));
oo = openowner(stp->st_stateowner);
status = nfserr_bad_stateid;
if (oo->oo_flags & NFS4_OO_CONFIRMED)
goto out;
oo->oo_flags |= NFS4_OO_CONFIRMED;
update_stateid(&stp->st_stid.sc_stateid);
memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
__func__, oc->oc_seqid, STATEID_VAL(&stp->st_stateid));
__func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
nfsd4_create_clid_dir(sop->so_client);
nfsd4_create_clid_dir(oo->oo_owner.so_client);
status = nfs_ok;
out:
if (oc->oc_stateowner) {
nfs4_get_stateowner(oc->oc_stateowner);
cstate->replay_owner = oc->oc_stateowner;
}
if (!cstate->replay_owner)
nfs4_unlock_state();
return status;
}
static inline void nfs4_file_downgrade(struct nfs4_stateid *stp, unsigned int to_access)
static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
{
int i;
if (!test_bit(access, &stp->st_access_bmap))
return;
nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(access));
__clear_bit(access, &stp->st_access_bmap);
}
for (i = 1; i < 4; i++) {
if (test_bit(i, &stp->st_access_bmap) && !(i & to_access)) {
nfs4_file_put_access(stp->st_file, i);
__clear_bit(i, &stp->st_access_bmap);
}
static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
{
switch (to_access) {
case NFS4_SHARE_ACCESS_READ:
nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
break;
case NFS4_SHARE_ACCESS_WRITE:
nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
break;
case NFS4_SHARE_ACCESS_BOTH:
break;
default:
BUG();
}
}
......@@ -3553,24 +3581,20 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
struct nfsd4_open_downgrade *od)
{
__be32 status;
struct nfs4_stateid *stp;
struct nfs4_ol_stateid *stp;
dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n",
(int)cstate->current_fh.fh_dentry->d_name.len,
cstate->current_fh.fh_dentry->d_name.name);
if (!access_valid(od->od_share_access, cstate->minorversion)
|| !deny_valid(od->od_share_deny))
return nfserr_inval;
/* We don't yet support WANT bits: */
od->od_share_access &= NFS4_SHARE_ACCESS_MASK;
nfs4_lock_state();
if ((status = nfs4_preprocess_seqid_op(cstate,
od->od_seqid,
&od->od_stateid,
OPEN_STATE,
&od->od_stateowner, &stp, NULL)))
status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
&od->od_stateid, &stp);
if (status)
goto out;
status = nfserr_inval;
if (!test_bit(od->od_share_access, &stp->st_access_bmap)) {
dprintk("NFSD:access not a subset current bitmap: 0x%lx, input access=%08x\n",
......@@ -3582,22 +3606,45 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
stp->st_deny_bmap, od->od_share_deny);
goto out;
}
nfs4_file_downgrade(stp, od->od_share_access);
nfs4_stateid_downgrade(stp, od->od_share_access);
reset_union_bmap_deny(od->od_share_deny, &stp->st_deny_bmap);
update_stateid(&stp->st_stateid);
memcpy(&od->od_stateid, &stp->st_stateid, sizeof(stateid_t));
update_stateid(&stp->st_stid.sc_stateid);
memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
status = nfs_ok;
out:
if (od->od_stateowner) {
nfs4_get_stateowner(od->od_stateowner);
cstate->replay_owner = od->od_stateowner;
}
if (!cstate->replay_owner)
nfs4_unlock_state();
return status;
}
void nfsd4_purge_closed_stateid(struct nfs4_stateowner *so)
{
struct nfs4_openowner *oo;
struct nfs4_ol_stateid *s;
if (!so->so_is_open_owner)
return;
oo = openowner(so);
s = oo->oo_last_closed_stid;
if (!s)
return;
if (!(oo->oo_flags & NFS4_OO_PURGE_CLOSE)) {
/* Release the last_closed_stid on the next seqid bump: */
oo->oo_flags |= NFS4_OO_PURGE_CLOSE;
return;
}
oo->oo_flags &= ~NFS4_OO_PURGE_CLOSE;
release_last_closed_stateid(oo);
}
static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
{
unhash_open_stateid(s);
s->st_stid.sc_type = NFS4_CLOSED_STID;
}
/*
* nfs4_unlock_state() called after encode
*/
......@@ -3606,38 +3653,36 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_close *close)
{
__be32 status;
struct nfs4_stateid *stp;
struct nfs4_openowner *oo;
struct nfs4_ol_stateid *stp;
dprintk("NFSD: nfsd4_close on file %.*s\n",
(int)cstate->current_fh.fh_dentry->d_name.len,
cstate->current_fh.fh_dentry->d_name.name);
nfs4_lock_state();
/* check close_lru for replay */
if ((status = nfs4_preprocess_seqid_op(cstate,
close->cl_seqid,
status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
&close->cl_stateid,
OPEN_STATE | CLOSE_STATE,
&close->cl_stateowner, &stp, NULL)))
NFS4_OPEN_STID|NFS4_CLOSED_STID,
&stp);
if (status)
goto out;
oo = openowner(stp->st_stateowner);
status = nfs_ok;
update_stateid(&stp->st_stateid);
memcpy(&close->cl_stateid, &stp->st_stateid, sizeof(stateid_t));
update_stateid(&stp->st_stid.sc_stateid);
memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
/* release_stateid() calls nfsd_close() if needed */
release_open_stateid(stp);
nfsd4_close_open_stateid(stp);
oo->oo_last_closed_stid = stp;
/* place unused nfs4_stateowners on so_close_lru list to be
* released by the laundromat service after the lease period
* to enable us to handle CLOSE replay
*/
if (list_empty(&close->cl_stateowner->so_stateids))
move_to_close_lru(close->cl_stateowner);
if (list_empty(&oo->oo_owner.so_stateids))
move_to_close_lru(oo);
out:
if (close->cl_stateowner) {
nfs4_get_stateowner(close->cl_stateowner);
cstate->replay_owner = close->cl_stateowner;
}
if (!cstate->replay_owner)
nfs4_unlock_state();
return status;
}
......@@ -3648,34 +3693,22 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
{
struct nfs4_delegation *dp;
stateid_t *stateid = &dr->dr_stateid;
struct nfs4_stid *s;
struct inode *inode;
__be32 status;
int flags = 0;
if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
return status;
inode = cstate->current_fh.fh_dentry->d_inode;
if (nfsd4_has_session(cstate))
flags |= HAS_SESSION;
nfs4_lock_state();
status = nfserr_bad_stateid;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
goto out;
status = nfserr_stale_stateid;
if (STALE_STATEID(stateid))
goto out;
status = nfserr_bad_stateid;
if (!is_delegation_stateid(stateid))
goto out;
status = nfserr_expired;
dp = find_delegation_stateid(inode, stateid);
if (!dp)
status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID, &s);
if (status)
goto out;
status = check_stateid_generation(stateid, &dp->dl_stateid, flags);
dp = delegstateid(s);
status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
if (status)
goto out;
renew_client(dp->dl_client);
unhash_delegation(dp);
out:
......@@ -3713,9 +3746,6 @@ last_byte_offset(u64 start, u64 len)
return end > start ? end - 1: NFS4_MAX_UINT64;
}
#define lockownerid_hashval(id) \
((id) & LOCK_HASH_MASK)
static inline unsigned int
lock_ownerstr_hashval(struct inode *inode, u32 cl_id,
struct xdr_netobj *ownername)
......@@ -3725,101 +3755,7 @@ lock_ownerstr_hashval(struct inode *inode, u32 cl_id,
& LOCK_HASH_MASK;
}
static struct list_head lock_ownerid_hashtbl[LOCK_HASH_SIZE];
static struct list_head lock_ownerstr_hashtbl[LOCK_HASH_SIZE];
static struct list_head lockstateid_hashtbl[STATEID_HASH_SIZE];
static int
same_stateid(stateid_t *id_one, stateid_t *id_two)
{
if (id_one->si_stateownerid != id_two->si_stateownerid)
return 0;
return id_one->si_fileid == id_two->si_fileid;
}
static struct nfs4_stateid *
find_stateid(stateid_t *stid, int flags)
{
struct nfs4_stateid *local;
u32 st_id = stid->si_stateownerid;
u32 f_id = stid->si_fileid;
unsigned int hashval;
dprintk("NFSD: find_stateid flags 0x%x\n",flags);
if (flags & (LOCK_STATE | RD_STATE | WR_STATE)) {
hashval = stateid_hashval(st_id, f_id);
list_for_each_entry(local, &lockstateid_hashtbl[hashval], st_hash) {
if ((local->st_stateid.si_stateownerid == st_id) &&
(local->st_stateid.si_fileid == f_id))
return local;
}
}
if (flags & (OPEN_STATE | RD_STATE | WR_STATE)) {
hashval = stateid_hashval(st_id, f_id);
list_for_each_entry(local, &stateid_hashtbl[hashval], st_hash) {
if ((local->st_stateid.si_stateownerid == st_id) &&
(local->st_stateid.si_fileid == f_id))
return local;
}
}
return NULL;
}
static struct nfs4_stateid *
search_for_stateid(stateid_t *stid)
{
struct nfs4_stateid *local;
unsigned int hashval = stateid_hashval(stid->si_stateownerid, stid->si_fileid);
list_for_each_entry(local, &lockstateid_hashtbl[hashval], st_hash) {
if (same_stateid(&local->st_stateid, stid))
return local;
}
list_for_each_entry(local, &stateid_hashtbl[hashval], st_hash) {
if (same_stateid(&local->st_stateid, stid))
return local;
}
return NULL;
}
static struct nfs4_delegation *
search_for_delegation(stateid_t *stid)
{
struct nfs4_file *fp;
struct nfs4_delegation *dp;
struct list_head *pos;
int i;
for (i = 0; i < FILE_HASH_SIZE; i++) {
list_for_each_entry(fp, &file_hashtbl[i], fi_hash) {
list_for_each(pos, &fp->fi_delegations) {
dp = list_entry(pos, struct nfs4_delegation, dl_perfile);
if (same_stateid(&dp->dl_stateid, stid))
return dp;
}
}
}
return NULL;
}
static struct nfs4_delegation *
find_delegation_stateid(struct inode *ino, stateid_t *stid)
{
struct nfs4_file *fp;
struct nfs4_delegation *dl;
dprintk("NFSD: %s: stateid=" STATEID_FMT "\n", __func__,
STATEID_VAL(stid));
fp = find_file(ino);
if (!fp)
return NULL;
dl = find_delegation_file(fp, stid);
put_nfs4_file(fp);
return dl;
}
/*
* TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
......@@ -3846,15 +3782,21 @@ static const struct lock_manager_operations nfsd_posix_mng_ops = {
static inline void
nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
{
struct nfs4_stateowner *sop;
struct nfs4_lockowner *lo;
if (fl->fl_lmops == &nfsd_posix_mng_ops) {
sop = (struct nfs4_stateowner *) fl->fl_owner;
kref_get(&sop->so_ref);
deny->ld_sop = sop;
deny->ld_clientid = sop->so_client->cl_clientid;
lo = (struct nfs4_lockowner *) fl->fl_owner;
deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
lo->lo_owner.so_owner.len, GFP_KERNEL);
if (!deny->ld_owner.data)
/* We just don't care that much */
goto nevermind;
deny->ld_owner.len = lo->lo_owner.so_owner.len;
deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
} else {
deny->ld_sop = NULL;
nevermind:
deny->ld_owner.len = 0;
deny->ld_owner.data = NULL;
deny->ld_clientid.cl_boot = 0;
deny->ld_clientid.cl_id = 0;
}
......@@ -3867,8 +3809,8 @@ nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
deny->ld_type = NFS4_WRITE_LT;
}
static struct nfs4_stateowner *
find_lockstateowner_str(struct inode *inode, clientid_t *clid,
static struct nfs4_lockowner *
find_lockowner_str(struct inode *inode, clientid_t *clid,
struct xdr_netobj *owner)
{
unsigned int hashval = lock_ownerstr_hashval(inode, clid->cl_id, owner);
......@@ -3876,11 +3818,17 @@ find_lockstateowner_str(struct inode *inode, clientid_t *clid,
list_for_each_entry(op, &lock_ownerstr_hashtbl[hashval], so_strhash) {
if (same_owner_str(op, owner, clid))
return op;
return lockowner(op);
}
return NULL;
}
static void hash_lockowner(struct nfs4_lockowner *lo, unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp)
{
list_add(&lo->lo_owner.so_strhash, &lock_ownerstr_hashtbl[strhashval]);
list_add(&lo->lo_perstateid, &open_stp->st_lockowners);
}
/*
* Alloc a lock owner structure.
* Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
......@@ -3889,67 +3837,40 @@ find_lockstateowner_str(struct inode *inode, clientid_t *clid,
* strhashval = lock_ownerstr_hashval
*/
static struct nfs4_stateowner *
alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_stateid *open_stp, struct nfsd4_lock *lock) {
struct nfs4_stateowner *sop;
struct nfs4_replay *rp;
unsigned int idhashval;
static struct nfs4_lockowner *
alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) {
struct nfs4_lockowner *lo;
if (!(sop = alloc_stateowner(&lock->lk_new_owner)))
lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
if (!lo)
return NULL;
idhashval = lockownerid_hashval(current_ownerid);
INIT_LIST_HEAD(&sop->so_idhash);
INIT_LIST_HEAD(&sop->so_strhash);
INIT_LIST_HEAD(&sop->so_perclient);
INIT_LIST_HEAD(&sop->so_stateids);
INIT_LIST_HEAD(&sop->so_perstateid);
INIT_LIST_HEAD(&sop->so_close_lru); /* not used */
sop->so_time = 0;
list_add(&sop->so_idhash, &lock_ownerid_hashtbl[idhashval]);
list_add(&sop->so_strhash, &lock_ownerstr_hashtbl[strhashval]);
list_add(&sop->so_perstateid, &open_stp->st_lockowners);
sop->so_is_open_owner = 0;
sop->so_id = current_ownerid++;
sop->so_client = clp;
INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
lo->lo_owner.so_is_open_owner = 0;
/* It is the openowner seqid that will be incremented in encode in the
* case of new lockowners; so increment the lock seqid manually: */
sop->so_seqid = lock->lk_new_lock_seqid + 1;
sop->so_confirmed = 1;
rp = &sop->so_replay;
rp->rp_status = nfserr_serverfault;
rp->rp_buflen = 0;
rp->rp_buf = rp->rp_ibuf;
return sop;
lo->lo_owner.so_seqid = lock->lk_new_lock_seqid + 1;
hash_lockowner(lo, strhashval, clp, open_stp);
return lo;
}
static struct nfs4_stateid *
alloc_init_lock_stateid(struct nfs4_stateowner *sop, struct nfs4_file *fp, struct nfs4_stateid *open_stp)
static struct nfs4_ol_stateid *
alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct nfs4_ol_stateid *open_stp)
{
struct nfs4_stateid *stp;
unsigned int hashval = stateid_hashval(sop->so_id, fp->fi_id);
struct nfs4_ol_stateid *stp;
struct nfs4_client *clp = lo->lo_owner.so_client;
stp = nfs4_alloc_stateid();
stp = nfs4_alloc_stateid(clp);
if (stp == NULL)
goto out;
INIT_LIST_HEAD(&stp->st_hash);
INIT_LIST_HEAD(&stp->st_perfile);
INIT_LIST_HEAD(&stp->st_perstateowner);
INIT_LIST_HEAD(&stp->st_lockowners); /* not used */
list_add(&stp->st_hash, &lockstateid_hashtbl[hashval]);
return NULL;
init_stid(&stp->st_stid, clp, NFS4_LOCK_STID);
list_add(&stp->st_perfile, &fp->fi_stateids);
list_add(&stp->st_perstateowner, &sop->so_stateids);
stp->st_stateowner = sop;
list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
stp->st_stateowner = &lo->lo_owner;
get_nfs4_file(fp);
stp->st_file = fp;
stp->st_stateid.si_boot = boot_time;
stp->st_stateid.si_stateownerid = sop->so_id;
stp->st_stateid.si_fileid = fp->fi_id;
stp->st_stateid.si_generation = 0;
stp->st_access_bmap = 0;
stp->st_deny_bmap = open_stp->st_deny_bmap;
stp->st_openstp = open_stp;
out:
return stp;
}
......@@ -3960,7 +3881,7 @@ check_lock_length(u64 offset, u64 length)
LOFF_OVERFLOW(offset, length)));
}
static void get_lock_access(struct nfs4_stateid *lock_stp, u32 access)
static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
{
struct nfs4_file *fp = lock_stp->st_file;
int oflag = nfs4_access_to_omode(access);
......@@ -3978,15 +3899,16 @@ __be32
nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_lock *lock)
{
struct nfs4_stateowner *open_sop = NULL;
struct nfs4_stateowner *lock_sop = NULL;
struct nfs4_stateid *lock_stp;
struct nfs4_openowner *open_sop = NULL;
struct nfs4_lockowner *lock_sop = NULL;
struct nfs4_ol_stateid *lock_stp;
struct nfs4_file *fp;
struct file *filp = NULL;
struct file_lock file_lock;
struct file_lock conflock;
__be32 status = 0;
unsigned int strhashval;
int lkflg;
int err;
dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
......@@ -4010,7 +3932,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* Use open owner and open stateid to create lock owner and
* lock stateid.
*/
struct nfs4_stateid *open_stp = NULL;
struct nfs4_ol_stateid *open_stp = NULL;
status = nfserr_stale_clientid;
if (!nfsd4_has_session(cstate) &&
......@@ -4018,26 +3940,29 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
/* validate and update open stateid and open seqid */
status = nfs4_preprocess_seqid_op(cstate,
status = nfs4_preprocess_confirmed_seqid_op(cstate,
lock->lk_new_open_seqid,
&lock->lk_new_open_stateid,
OPEN_STATE,
&lock->lk_replay_owner, &open_stp,
lock);
&open_stp);
if (status)
goto out;
open_sop = lock->lk_replay_owner;
open_sop = openowner(open_stp->st_stateowner);
status = nfserr_bad_stateid;
if (!nfsd4_has_session(cstate) &&
!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
&lock->v.new.clientid))
goto out;
/* create lockowner and lock stateid */
fp = open_stp->st_file;
strhashval = lock_ownerstr_hashval(fp->fi_inode,
open_sop->so_client->cl_clientid.cl_id,
open_sop->oo_owner.so_client->cl_clientid.cl_id,
&lock->v.new.owner);
/* XXX: Do we need to check for duplicate stateowners on
* the same file, or should they just be allowed (and
* create new stateids)? */
status = nfserr_resource;
status = nfserr_jukebox;
lock_sop = alloc_init_lock_stateowner(strhashval,
open_sop->so_client, open_stp, lock);
open_sop->oo_owner.so_client, open_stp, lock);
if (lock_sop == NULL)
goto out;
lock_stp = alloc_init_lock_stateid(lock_sop, fp, open_stp);
......@@ -4048,14 +3973,18 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfs4_preprocess_seqid_op(cstate,
lock->lk_old_lock_seqid,
&lock->lk_old_lock_stateid,
LOCK_STATE,
&lock->lk_replay_owner, &lock_stp, lock);
NFS4_LOCK_STID, &lock_stp);
if (status)
goto out;
lock_sop = lock->lk_replay_owner;
lock_sop = lockowner(lock_stp->st_stateowner);
fp = lock_stp->st_file;
}
/* lock->lk_replay_owner and lock_stp have been created or found */
/* lock_sop and lock_stp have been created or found */
lkflg = setlkflg(lock->lk_type);
status = nfs4_check_openmode(lock_stp, lkflg);
if (status)
goto out;
status = nfserr_grace;
if (locks_in_grace() && !lock->lk_reclaim)
......@@ -4106,8 +4035,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
err = vfs_lock_file(filp, F_SETLK, &file_lock, &conflock);
switch (-err) {
case 0: /* success! */
update_stateid(&lock_stp->st_stateid);
memcpy(&lock->lk_resp_stateid, &lock_stp->st_stateid,
update_stateid(&lock_stp->st_stid.sc_stateid);
memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid,
sizeof(stateid_t));
status = 0;
break;
......@@ -4121,16 +4050,13 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
break;
default:
dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
status = nfserr_resource;
status = nfserrno(err);
break;
}
out:
if (status && lock->lk_is_new && lock_sop)
release_lockowner(lock_sop);
if (lock->lk_replay_owner) {
nfs4_get_stateowner(lock->lk_replay_owner);
cstate->replay_owner = lock->lk_replay_owner;
}
if (!cstate->replay_owner)
nfs4_unlock_state();
return status;
}
......@@ -4163,6 +4089,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
{
struct inode *inode;
struct file_lock file_lock;
struct nfs4_lockowner *lo;
int error;
__be32 status;
......@@ -4172,19 +4099,14 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (check_lock_length(lockt->lt_offset, lockt->lt_length))
return nfserr_inval;
lockt->lt_stateowner = NULL;
nfs4_lock_state();
status = nfserr_stale_clientid;
if (!nfsd4_has_session(cstate) && STALE_CLIENTID(&lockt->lt_clientid))
goto out;
if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) {
dprintk("NFSD: nfsd4_lockt: fh_verify() failed!\n");
if (status == nfserr_symlink)
status = nfserr_inval;
if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
goto out;
}
inode = cstate->current_fh.fh_dentry->d_inode;
locks_init_lock(&file_lock);
......@@ -4203,10 +4125,9 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
}
lockt->lt_stateowner = find_lockstateowner_str(inode,
&lockt->lt_clientid, &lockt->lt_owner);
if (lockt->lt_stateowner)
file_lock.fl_owner = (fl_owner_t)lockt->lt_stateowner;
lo = find_lockowner_str(inode, &lockt->lt_clientid, &lockt->lt_owner);
if (lo)
file_lock.fl_owner = (fl_owner_t)lo;
file_lock.fl_pid = current->tgid;
file_lock.fl_flags = FL_POSIX;
......@@ -4234,7 +4155,7 @@ __be32
nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_locku *locku)
{
struct nfs4_stateid *stp;
struct nfs4_ol_stateid *stp;
struct file *filp = NULL;
struct file_lock file_lock;
__be32 status;
......@@ -4249,13 +4170,10 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfs4_lock_state();
if ((status = nfs4_preprocess_seqid_op(cstate,
locku->lu_seqid,
&locku->lu_stateid,
LOCK_STATE,
&locku->lu_stateowner, &stp, NULL)))
status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
&locku->lu_stateid, NFS4_LOCK_STID, &stp);
if (status)
goto out;
filp = find_any_file(stp->st_file);
if (!filp) {
status = nfserr_lock_range;
......@@ -4264,7 +4182,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
BUG_ON(!filp);
locks_init_lock(&file_lock);
file_lock.fl_type = F_UNLCK;
file_lock.fl_owner = (fl_owner_t) locku->lu_stateowner;
file_lock.fl_owner = (fl_owner_t)lockowner(stp->st_stateowner);
file_lock.fl_pid = current->tgid;
file_lock.fl_file = filp;
file_lock.fl_flags = FL_POSIX;
......@@ -4285,14 +4203,11 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
/*
* OK, unlock succeeded; the only thing left to do is update the stateid.
*/
update_stateid(&stp->st_stateid);
memcpy(&locku->lu_stateid, &stp->st_stateid, sizeof(stateid_t));
update_stateid(&stp->st_stid.sc_stateid);
memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
out:
if (locku->lu_stateowner) {
nfs4_get_stateowner(locku->lu_stateowner);
cstate->replay_owner = locku->lu_stateowner;
}
if (!cstate->replay_owner)
nfs4_unlock_state();
return status;
......@@ -4307,7 +4222,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* 0: no locks held by lockowner
*/
static int
check_for_locks(struct nfs4_file *filp, struct nfs4_stateowner *lowner)
check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner)
{
struct file_lock **flpp;
struct inode *inode = filp->fi_inode;
......@@ -4332,7 +4247,8 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
{
clientid_t *clid = &rlockowner->rl_clientid;
struct nfs4_stateowner *sop;
struct nfs4_stateid *stp;
struct nfs4_lockowner *lo;
struct nfs4_ol_stateid *stp;
struct xdr_netobj *owner = &rlockowner->rl_owner;
struct list_head matches;
int i;
......@@ -4356,16 +4272,15 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
* data structures. */
INIT_LIST_HEAD(&matches);
for (i = 0; i < LOCK_HASH_SIZE; i++) {
list_for_each_entry(sop, &lock_ownerid_hashtbl[i], so_idhash) {
list_for_each_entry(sop, &lock_ownerstr_hashtbl[i], so_strhash) {
if (!same_owner_str(sop, owner, clid))
continue;
list_for_each_entry(stp, &sop->so_stateids,
st_perstateowner) {
if (check_for_locks(stp->st_file, sop))
lo = lockowner(sop);
if (check_for_locks(stp->st_file, lo))
goto out;
/* Note: so_perclient unused for lockowners,
* so it's OK to fool with here. */
list_add(&sop->so_perclient, &matches);
list_add(&lo->lo_list, &matches);
}
}
}
......@@ -4374,12 +4289,12 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
* have been checked. */
status = nfs_ok;
while (!list_empty(&matches)) {
sop = list_entry(matches.next, struct nfs4_stateowner,
so_perclient);
lo = list_entry(matches.next, struct nfs4_lockowner,
lo_list);
/* unhash_stateowner deletes so_perclient only
* for openowners. */
list_del(&sop->so_perclient);
release_lockowner(sop);
list_del(&lo->lo_list);
release_lockowner(lo);
}
out:
nfs4_unlock_state();
......@@ -4501,16 +4416,10 @@ nfs4_state_init(void)
for (i = 0; i < FILE_HASH_SIZE; i++) {
INIT_LIST_HEAD(&file_hashtbl[i]);
}
for (i = 0; i < OWNER_HASH_SIZE; i++) {
INIT_LIST_HEAD(&ownerstr_hashtbl[i]);
INIT_LIST_HEAD(&ownerid_hashtbl[i]);
}
for (i = 0; i < STATEID_HASH_SIZE; i++) {
INIT_LIST_HEAD(&stateid_hashtbl[i]);
INIT_LIST_HEAD(&lockstateid_hashtbl[i]);
for (i = 0; i < OPEN_OWNER_HASH_SIZE; i++) {
INIT_LIST_HEAD(&open_ownerstr_hashtbl[i]);
}
for (i = 0; i < LOCK_HASH_SIZE; i++) {
INIT_LIST_HEAD(&lock_ownerid_hashtbl[i]);
INIT_LIST_HEAD(&lock_ownerstr_hashtbl[i]);
}
memset(&onestateid, ~0, sizeof(stateid_t));
......@@ -4527,7 +4436,7 @@ nfsd4_load_reboot_recovery_data(void)
int status;
nfs4_lock_state();
nfsd4_init_recdir(user_recovery_dirname);
nfsd4_init_recdir();
status = nfsd4_recdir_load();
nfs4_unlock_state();
if (status)
......@@ -4636,40 +4545,3 @@ nfs4_state_shutdown(void)
nfs4_unlock_state();
nfsd4_destroy_callback_queue();
}
/*
* user_recovery_dirname is protected by the nfsd_mutex since it's only
* accessed when nfsd is starting.
*/
static void
nfs4_set_recdir(char *recdir)
{
strcpy(user_recovery_dirname, recdir);
}
/*
* Change the NFSv4 recovery directory to recdir.
*/
int
nfs4_reset_recoverydir(char *recdir)
{
int status;
struct path path;
status = kern_path(recdir, LOOKUP_FOLLOW, &path);
if (status)
return status;
status = -ENOTDIR;
if (S_ISDIR(path.dentry->d_inode->i_mode)) {
nfs4_set_recdir(recdir);
status = 0;
}
path_put(&path);
return status;
}
char *
nfs4_recoverydir(void)
{
return user_recovery_dirname;
}
......@@ -456,7 +456,6 @@ nfsd4_decode_close(struct nfsd4_compoundargs *argp, struct nfsd4_close *close)
{
DECODE_HEAD;
close->cl_stateowner = NULL;
READ_BUF(4);
READ32(close->cl_seqid);
return nfsd4_decode_stateid(argp, &close->cl_stateid);
......@@ -551,7 +550,6 @@ nfsd4_decode_lock(struct nfsd4_compoundargs *argp, struct nfsd4_lock *lock)
{
DECODE_HEAD;
lock->lk_replay_owner = NULL;
/*
* type, reclaim(boolean), offset, length, new_lock_owner(boolean)
*/
......@@ -611,7 +609,6 @@ nfsd4_decode_locku(struct nfsd4_compoundargs *argp, struct nfsd4_locku *locku)
{
DECODE_HEAD;
locku->lu_stateowner = NULL;
READ_BUF(8);
READ32(locku->lu_type);
if ((locku->lu_type < NFS4_READ_LT) || (locku->lu_type > NFS4_WRITEW_LT))
......@@ -642,6 +639,83 @@ nfsd4_decode_lookup(struct nfsd4_compoundargs *argp, struct nfsd4_lookup *lookup
DECODE_TAIL;
}
static __be32 nfsd4_decode_share_access(struct nfsd4_compoundargs *argp, u32 *x)
{
__be32 *p;
u32 w;
READ_BUF(4);
READ32(w);
*x = w;
switch (w & NFS4_SHARE_ACCESS_MASK) {
case NFS4_SHARE_ACCESS_READ:
case NFS4_SHARE_ACCESS_WRITE:
case NFS4_SHARE_ACCESS_BOTH:
break;
default:
return nfserr_bad_xdr;
}
w &= !NFS4_SHARE_ACCESS_MASK;
if (!w)
return nfs_ok;
if (!argp->minorversion)
return nfserr_bad_xdr;
switch (w & NFS4_SHARE_WANT_MASK) {
case NFS4_SHARE_WANT_NO_PREFERENCE:
case NFS4_SHARE_WANT_READ_DELEG:
case NFS4_SHARE_WANT_WRITE_DELEG:
case NFS4_SHARE_WANT_ANY_DELEG:
case NFS4_SHARE_WANT_NO_DELEG:
case NFS4_SHARE_WANT_CANCEL:
break;
default:
return nfserr_bad_xdr;
}
w &= ~NFS4_SHARE_WANT_MASK;
if (!w)
return nfs_ok;
switch (w) {
case NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL:
case NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED:
case (NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL |
NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED):
return nfs_ok;
}
xdr_error:
return nfserr_bad_xdr;
}
static __be32 nfsd4_decode_share_deny(struct nfsd4_compoundargs *argp, u32 *x)
{
__be32 *p;
READ_BUF(4);
READ32(*x);
/* Note: unlinke access bits, deny bits may be zero. */
if (*x & ~NFS4_SHARE_DENY_BOTH)
return nfserr_bad_xdr;
return nfs_ok;
xdr_error:
return nfserr_bad_xdr;
}
static __be32 nfsd4_decode_opaque(struct nfsd4_compoundargs *argp, struct xdr_netobj *o)
{
__be32 *p;
READ_BUF(4);
READ32(o->len);
if (o->len == 0 || o->len > NFS4_OPAQUE_LIMIT)
return nfserr_bad_xdr;
READ_BUF(o->len);
SAVEMEM(o->data, o->len);
return nfs_ok;
xdr_error:
return nfserr_bad_xdr;
}
static __be32
nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
{
......@@ -649,19 +723,23 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
memset(open->op_bmval, 0, sizeof(open->op_bmval));
open->op_iattr.ia_valid = 0;
open->op_stateowner = NULL;
open->op_openowner = NULL;
/* seqid, share_access, share_deny, clientid, ownerlen */
READ_BUF(16 + sizeof(clientid_t));
READ_BUF(4);
READ32(open->op_seqid);
READ32(open->op_share_access);
READ32(open->op_share_deny);
status = nfsd4_decode_share_access(argp, &open->op_share_access);
if (status)
goto xdr_error;
status = nfsd4_decode_share_deny(argp, &open->op_share_deny);
if (status)
goto xdr_error;
READ_BUF(sizeof(clientid_t));
COPYMEM(&open->op_clientid, sizeof(clientid_t));
READ32(open->op_owner.len);
/* owner, open_flag */
READ_BUF(open->op_owner.len + 4);
SAVEMEM(open->op_owner.data, open->op_owner.len);
status = nfsd4_decode_opaque(argp, &open->op_owner);
if (status)
goto xdr_error;
READ_BUF(4);
READ32(open->op_create);
switch (open->op_create) {
case NFS4_OPEN_NOCREATE:
......@@ -727,6 +805,19 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
if ((status = check_filename(open->op_fname.data, open->op_fname.len, nfserr_inval)))
return status;
break;
case NFS4_OPEN_CLAIM_FH:
case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
if (argp->minorversion < 1)
goto xdr_error;
/* void */
break;
case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
if (argp->minorversion < 1)
goto xdr_error;
status = nfsd4_decode_stateid(argp, &open->op_delegate_stateid);
if (status)
return status;
break;
default:
goto xdr_error;
}
......@@ -739,7 +830,6 @@ nfsd4_decode_open_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_open_con
{
DECODE_HEAD;
open_conf->oc_stateowner = NULL;
status = nfsd4_decode_stateid(argp, &open_conf->oc_req_stateid);
if (status)
return status;
......@@ -754,15 +844,17 @@ nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp, struct nfsd4_open_d
{
DECODE_HEAD;
open_down->od_stateowner = NULL;
status = nfsd4_decode_stateid(argp, &open_down->od_stateid);
if (status)
return status;
READ_BUF(12);
READ_BUF(4);
READ32(open_down->od_seqid);
READ32(open_down->od_share_access);
READ32(open_down->od_share_deny);
status = nfsd4_decode_share_access(argp, &open_down->od_share_access);
if (status)
return status;
status = nfsd4_decode_share_deny(argp, &open_down->od_share_deny);
if (status)
return status;
DECODE_TAIL;
}
......@@ -903,12 +995,13 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient
{
DECODE_HEAD;
READ_BUF(12);
READ_BUF(8);
COPYMEM(setclientid->se_verf.data, 8);
READ32(setclientid->se_namelen);
READ_BUF(setclientid->se_namelen + 8);
SAVEMEM(setclientid->se_name, setclientid->se_namelen);
status = nfsd4_decode_opaque(argp, &setclientid->se_name);
if (status)
return nfserr_bad_xdr;
READ_BUF(8);
READ32(setclientid->se_callback_prog);
READ32(setclientid->se_callback_netid_len);
......@@ -1051,11 +1144,9 @@ nfsd4_decode_exchange_id(struct nfsd4_compoundargs *argp,
READ_BUF(NFS4_VERIFIER_SIZE);
COPYMEM(exid->verifier.data, NFS4_VERIFIER_SIZE);
READ_BUF(4);
READ32(exid->clname.len);
READ_BUF(exid->clname.len);
SAVEMEM(exid->clname.data, exid->clname.len);
status = nfsd4_decode_opaque(argp, &exid->clname);
if (status)
return nfserr_bad_xdr;
READ_BUF(4);
READ32(exid->flags);
......@@ -1326,6 +1417,16 @@ nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp, struct nfsd4_test_sta
goto out;
}
static __be32 nfsd4_decode_destroy_clientid(struct nfsd4_compoundargs *argp, struct nfsd4_destroy_clientid *dc)
{
DECODE_HEAD;
READ_BUF(8);
COPYMEM(&dc->clientid, 8);
DECODE_TAIL;
}
static __be32 nfsd4_decode_reclaim_complete(struct nfsd4_compoundargs *argp, struct nfsd4_reclaim_complete *rc)
{
DECODE_HEAD;
......@@ -1447,7 +1548,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
[OP_SET_SSV] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_TEST_STATEID] = (nfsd4_dec)nfsd4_decode_test_stateid,
[OP_WANT_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_DESTROY_CLIENTID] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_DESTROY_CLIENTID] = (nfsd4_dec)nfsd4_decode_destroy_clientid,
[OP_RECLAIM_COMPLETE] = (nfsd4_dec)nfsd4_decode_reclaim_complete,
};
......@@ -1630,15 +1731,20 @@ static void write_cinfo(__be32 **p, struct nfsd4_change_info *c)
* we know whether the error to be returned is a sequence id mutating error.
*/
#define ENCODE_SEQID_OP_TAIL(stateowner) do { \
if (seqid_mutating_err(nfserr) && stateowner) { \
stateowner->so_seqid++; \
stateowner->so_replay.rp_status = nfserr; \
stateowner->so_replay.rp_buflen = \
(((char *)(resp)->p - (char *)save)); \
memcpy(stateowner->so_replay.rp_buf, save, \
stateowner->so_replay.rp_buflen); \
} } while (0);
static void encode_seqid_op_tail(struct nfsd4_compoundres *resp, __be32 *save, __be32 nfserr)
{
struct nfs4_stateowner *stateowner = resp->cstate.replay_owner;
if (seqid_mutating_err(ntohl(nfserr)) && stateowner) {
stateowner->so_seqid++;
stateowner->so_replay.rp_status = nfserr;
stateowner->so_replay.rp_buflen =
(char *)resp->p - (char *)save;
memcpy(stateowner->so_replay.rp_buf, save,
stateowner->so_replay.rp_buflen);
nfsd4_purge_closed_stateid(stateowner);
}
}
/* Encode as an array of strings the string given with components
* separated @sep.
......@@ -1697,36 +1803,89 @@ static __be32 nfsd4_encode_fs_location4(struct nfsd4_fs_location *location,
}
/*
* Return the path to an export point in the pseudo filesystem namespace
* Returned string is safe to use as long as the caller holds a reference
* to @exp.
* Encode a path in RFC3530 'pathname4' format
*/
static char *nfsd4_path(struct svc_rqst *rqstp, struct svc_export *exp, __be32 *stat)
static __be32 nfsd4_encode_path(const struct path *root,
const struct path *path, __be32 **pp, int *buflen)
{
struct svc_fh tmp_fh;
char *path = NULL, *rootpath;
size_t rootlen;
fh_init(&tmp_fh, NFS4_FHSIZE);
*stat = exp_pseudoroot(rqstp, &tmp_fh);
if (*stat)
return NULL;
rootpath = tmp_fh.fh_export->ex_pathname;
struct path cur = {
.mnt = path->mnt,
.dentry = path->dentry,
};
__be32 *p = *pp;
struct dentry **components = NULL;
unsigned int ncomponents = 0;
__be32 err = nfserr_jukebox;
path = exp->ex_pathname;
dprintk("nfsd4_encode_components(");
rootlen = strlen(rootpath);
if (strncmp(path, rootpath, rootlen)) {
dprintk("nfsd: fs_locations failed;"
"%s is not contained in %s\n", path, rootpath);
*stat = nfserr_notsupp;
path = NULL;
goto out;
path_get(&cur);
/* First walk the path up to the nfsd root, and store the
* dentries/path components in an array.
*/
for (;;) {
if (cur.dentry == root->dentry && cur.mnt == root->mnt)
break;
if (cur.dentry == cur.mnt->mnt_root) {
if (follow_up(&cur))
continue;
goto out_free;
}
if ((ncomponents & 15) == 0) {
struct dentry **new;
new = krealloc(components,
sizeof(*new) * (ncomponents + 16),
GFP_KERNEL);
if (!new)
goto out_free;
components = new;
}
components[ncomponents++] = cur.dentry;
cur.dentry = dget_parent(cur.dentry);
}
*buflen -= 4;
if (*buflen < 0)
goto out_free;
WRITE32(ncomponents);
while (ncomponents) {
struct dentry *dentry = components[ncomponents - 1];
unsigned int len = dentry->d_name.len;
*buflen -= 4 + (XDR_QUADLEN(len) << 2);
if (*buflen < 0)
goto out_free;
WRITE32(len);
WRITEMEM(dentry->d_name.name, len);
dprintk("/%s", dentry->d_name.name);
dput(dentry);
ncomponents--;
}
path += rootlen;
out:
fh_put(&tmp_fh);
return path;
*pp = p;
err = 0;
out_free:
dprintk(")\n");
while (ncomponents)
dput(components[--ncomponents]);
kfree(components);
path_put(&cur);
return err;
}
static __be32 nfsd4_encode_fsloc_fsroot(struct svc_rqst *rqstp,
const struct path *path, __be32 **pp, int *buflen)
{
struct svc_export *exp_ps;
__be32 res;
exp_ps = rqst_find_fsidzero_export(rqstp);
if (IS_ERR(exp_ps))
return nfserrno(PTR_ERR(exp_ps));
res = nfsd4_encode_path(&exp_ps->ex_path, path, pp, buflen);
exp_put(exp_ps);
return res;
}
/*
......@@ -1740,11 +1899,8 @@ static __be32 nfsd4_encode_fs_locations(struct svc_rqst *rqstp,
int i;
__be32 *p = *pp;
struct nfsd4_fs_locations *fslocs = &exp->ex_fslocs;
char *root = nfsd4_path(rqstp, exp, &status);
if (status)
return status;
status = nfsd4_encode_components('/', root, &p, buflen);
status = nfsd4_encode_fsloc_fsroot(rqstp, &exp->ex_path, &p, buflen);
if (status)
return status;
if ((*buflen -= 4) < 0)
......@@ -1760,12 +1916,19 @@ static __be32 nfsd4_encode_fs_locations(struct svc_rqst *rqstp,
return 0;
}
static u32 nfs4_ftypes[16] = {
NF4BAD, NF4FIFO, NF4CHR, NF4BAD,
NF4DIR, NF4BAD, NF4BLK, NF4BAD,
NF4REG, NF4BAD, NF4LNK, NF4BAD,
NF4SOCK, NF4BAD, NF4LNK, NF4BAD,
};
static u32 nfs4_file_type(umode_t mode)
{
switch (mode & S_IFMT) {
case S_IFIFO: return NF4FIFO;
case S_IFCHR: return NF4CHR;
case S_IFDIR: return NF4DIR;
case S_IFBLK: return NF4BLK;
case S_IFLNK: return NF4LNK;
case S_IFREG: return NF4REG;
case S_IFSOCK: return NF4SOCK;
default: return NF4BAD;
};
}
static __be32
nfsd4_encode_name(struct svc_rqst *rqstp, int whotype, uid_t id, int group,
......@@ -1954,7 +2117,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
if (bmval0 & FATTR4_WORD0_TYPE) {
if ((buflen -= 4) < 0)
goto out_resource;
dummy = nfs4_ftypes[(stat.mode & S_IFMT) >> 12];
dummy = nfs4_file_type(stat.mode);
if (dummy == NF4BAD)
goto out_serverfault;
WRITE32(dummy);
......@@ -2488,7 +2651,7 @@ nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_c
if (!nfserr)
nfsd4_encode_stateid(resp, &close->cl_stateid);
ENCODE_SEQID_OP_TAIL(close->cl_stateowner);
encode_seqid_op_tail(resp, save, nfserr);
return nfserr;
}
......@@ -2564,17 +2727,18 @@ nfsd4_encode_getfh(struct nfsd4_compoundres *resp, __be32 nfserr, struct svc_fh
static void
nfsd4_encode_lock_denied(struct nfsd4_compoundres *resp, struct nfsd4_lock_denied *ld)
{
struct xdr_netobj *conf = &ld->ld_owner;
__be32 *p;
RESERVE_SPACE(32 + XDR_LEN(ld->ld_sop ? ld->ld_sop->so_owner.len : 0));
RESERVE_SPACE(32 + XDR_LEN(conf->len));
WRITE64(ld->ld_start);
WRITE64(ld->ld_length);
WRITE32(ld->ld_type);
if (ld->ld_sop) {
if (conf->len) {
WRITEMEM(&ld->ld_clientid, 8);
WRITE32(ld->ld_sop->so_owner.len);
WRITEMEM(ld->ld_sop->so_owner.data, ld->ld_sop->so_owner.len);
kref_put(&ld->ld_sop->so_ref, nfs4_free_stateowner);
WRITE32(conf->len);
WRITEMEM(conf->data, conf->len);
kfree(conf->data);
} else { /* non - nfsv4 lock in conflict, no clientid nor owner */
WRITE64((u64)0); /* clientid */
WRITE32(0); /* length of owner name */
......@@ -2592,7 +2756,7 @@ nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lo
else if (nfserr == nfserr_denied)
nfsd4_encode_lock_denied(resp, &lock->lk_denied);
ENCODE_SEQID_OP_TAIL(lock->lk_replay_owner);
encode_seqid_op_tail(resp, save, nfserr);
return nfserr;
}
......@@ -2612,7 +2776,7 @@ nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_l
if (!nfserr)
nfsd4_encode_stateid(resp, &locku->lu_stateid);
ENCODE_SEQID_OP_TAIL(locku->lu_stateowner);
encode_seqid_op_tail(resp, save, nfserr);
return nfserr;
}
......@@ -2693,7 +2857,7 @@ nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_op
}
/* XXX save filehandle here */
out:
ENCODE_SEQID_OP_TAIL(open->op_stateowner);
encode_seqid_op_tail(resp, save, nfserr);
return nfserr;
}
......@@ -2705,7 +2869,7 @@ nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr, struct
if (!nfserr)
nfsd4_encode_stateid(resp, &oc->oc_resp_stateid);
ENCODE_SEQID_OP_TAIL(oc->oc_stateowner);
encode_seqid_op_tail(resp, save, nfserr);
return nfserr;
}
......@@ -2717,7 +2881,7 @@ nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, __be32 nfserr, struc
if (!nfserr)
nfsd4_encode_stateid(resp, &od->od_stateid);
ENCODE_SEQID_OP_TAIL(od->od_stateowner);
encode_seqid_op_tail(resp, save, nfserr);
return nfserr;
}
......@@ -2759,8 +2923,6 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
read->rd_offset, resp->rqstp->rq_vec, read->rd_vlen,
&maxcount);
if (nfserr == nfserr_symlink)
nfserr = nfserr_inval;
if (nfserr)
return nfserr;
eof = (read->rd_offset + maxcount >=
......@@ -2886,8 +3048,6 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
readdir->common.err == nfserr_toosmall &&
readdir->buffer == page)
nfserr = nfserr_toosmall;
if (nfserr == nfserr_symlink)
nfserr = nfserr_notdir;
if (nfserr)
goto err_no_verf;
......@@ -3218,9 +3378,9 @@ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, int nfserr,
WRITEMEM(seq->sessionid.data, NFS4_MAX_SESSIONID_LEN);
WRITE32(seq->seqid);
WRITE32(seq->slotid);
WRITE32(seq->maxslots);
/* For now: target_maxslots = maxslots */
WRITE32(seq->maxslots);
/* Note slotid's are numbered from zero: */
WRITE32(seq->maxslots - 1); /* sr_highest_slotid */
WRITE32(seq->maxslots - 1); /* sr_target_highest_slotid */
WRITE32(seq->status_flags);
ADJUST_ARGS();
......@@ -3233,6 +3393,7 @@ nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, int nfserr,
struct nfsd4_test_stateid *test_stateid)
{
struct nfsd4_compoundargs *argp;
struct nfs4_client *cl = resp->cstate.session->se_client;
stateid_t si;
__be32 *p;
int i;
......@@ -3248,7 +3409,7 @@ nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, int nfserr,
nfs4_lock_state();
for (i = 0; i < test_stateid->ts_num_ids; i++) {
nfsd4_decode_stateid(argp, &si);
valid = nfs4_validate_stateid(&si, test_stateid->ts_has_session);
valid = nfs4_validate_stateid(cl, &si);
RESERVE_SPACE(4);
*p++ = htonl(valid);
resp->p = p;
......@@ -3334,34 +3495,29 @@ static nfsd4_enc nfsd4_enc_ops[] = {
/*
* Calculate the total amount of memory that the compound response has taken
* after encoding the current operation.
* after encoding the current operation with pad.
*
* pad: add on 8 bytes for the next operation's op_code and status so that
* there is room to cache a failure on the next operation.
* pad: if operation is non-idempotent, pad was calculate by op_rsize_bop()
* which was specified at nfsd4_operation, else pad is zero.
*
* Compare this length to the session se_fmaxresp_cached.
* Compare this length to the session se_fmaxresp_sz and se_fmaxresp_cached.
*
* Our se_fmaxresp_cached will always be a multiple of PAGE_SIZE, and so
* will be at least a page and will therefore hold the xdr_buf head.
*/
static int nfsd4_check_drc_limit(struct nfsd4_compoundres *resp)
int nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 pad)
{
int status = 0;
struct xdr_buf *xb = &resp->rqstp->rq_res;
struct nfsd4_compoundargs *args = resp->rqstp->rq_argp;
struct nfsd4_session *session = NULL;
struct nfsd4_slot *slot = resp->cstate.slot;
u32 length, tlen = 0, pad = 8;
u32 length, tlen = 0;
if (!nfsd4_has_session(&resp->cstate))
return status;
return 0;
session = resp->cstate.session;
if (session == NULL || slot->sl_cachethis == 0)
return status;
if (resp->opcnt >= args->opcnt)
pad = 0; /* this is the last operation */
if (session == NULL)
return 0;
if (xb->page_len == 0) {
length = (char *)resp->p - (char *)xb->head[0].iov_base + pad;
......@@ -3374,10 +3530,14 @@ static int nfsd4_check_drc_limit(struct nfsd4_compoundres *resp)
dprintk("%s length %u, xb->page_len %u tlen %u pad %u\n", __func__,
length, xb->page_len, tlen, pad);
if (length <= session->se_fchannel.maxresp_cached)
return status;
else
if (length > session->se_fchannel.maxresp_sz)
return nfserr_rep_too_big;
if (slot->sl_cachethis == 1 &&
length > session->se_fchannel.maxresp_cached)
return nfserr_rep_too_big_to_cache;
return 0;
}
void
......@@ -3397,8 +3557,8 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
!nfsd4_enc_ops[op->opnum]);
op->status = nfsd4_enc_ops[op->opnum](resp, op->status, &op->u);
/* nfsd4_check_drc_limit guarantees enough room for error status */
if (!op->status && nfsd4_check_drc_limit(resp))
op->status = nfserr_rep_too_big_to_cache;
if (!op->status)
op->status = nfsd4_check_resp_size(resp, 0);
status:
/*
* Note: We write the status directly, instead of using WRITE32(),
......
......@@ -9,7 +9,6 @@
#include <linux/ctype.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/nfsd/syscall.h>
#include <linux/lockd/lockd.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/gss_api.h>
......
......@@ -11,13 +11,39 @@
#include <linux/types.h>
#include <linux/mount.h>
#include <linux/nfs.h>
#include <linux/nfs2.h>
#include <linux/nfs3.h>
#include <linux/nfs4.h>
#include <linux/sunrpc/msg_prot.h>
#include <linux/nfsd/debug.h>
#include <linux/nfsd/export.h>
#include <linux/nfsd/stats.h>
/*
* nfsd version
*/
#define NFSD_SUPPORTED_MINOR_VERSION 1
/*
* Maximum blocksizes supported by daemon under various circumstances.
*/
#define NFSSVC_MAXBLKSIZE RPCSVC_MAXPAYLOAD
/* NFSv2 is limited by the protocol specification, see RFC 1094 */
#define NFSSVC_MAXBLKSIZE_V2 (8*1024)
/*
* Largest number of bytes we need to allocate for an NFS
* call or reply. Used to control buffer sizes. We use
* the length of v3 WRITE, READDIR and READDIR replies
* which are an RPC header, up to 26 XDR units of reply
* data, and some page data.
*
* Note that accuracy here doesn't matter too much as the
* size is rounded up to a page size when allocating space.
*/
#define NFSD_BUFSIZE ((RPC_MAX_HEADER_WITH_AUTH+26)*XDR_UNIT + NFSSVC_MAXBLKSIZE)
struct readdir_cd {
__be32 err; /* 0, nfserr, or nfserr_eof */
......@@ -335,6 +361,13 @@ static inline u32 nfsd_suppattrs2(u32 minorversion)
#define NFSD_SUPPATTR_EXCLCREAT_WORD2 \
NFSD_WRITEABLE_ATTRS_WORD2
extern int nfsd4_is_junction(struct dentry *dentry);
#else
static inline int nfsd4_is_junction(struct dentry *dentry)
{
return 0;
}
#endif /* CONFIG_NFSD_V4 */
#endif /* LINUX_NFSD_NFSD_H */
......@@ -59,28 +59,25 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry)
* the write call).
*/
static inline __be32
nfsd_mode_check(struct svc_rqst *rqstp, umode_t mode, int type)
nfsd_mode_check(struct svc_rqst *rqstp, umode_t mode, int requested)
{
/* Type can be negative when creating hardlinks - not to a dir */
if (type > 0 && (mode & S_IFMT) != type) {
if (rqstp->rq_vers == 4 && (mode & S_IFMT) == S_IFLNK)
mode &= S_IFMT;
if (requested == 0) /* the caller doesn't care */
return nfs_ok;
if (mode == requested)
return nfs_ok;
/*
* v4 has an error more specific than err_notdir which we should
* return in preference to err_notdir:
*/
if (rqstp->rq_vers == 4 && mode == S_IFLNK)
return nfserr_symlink;
else if (type == S_IFDIR)
if (requested == S_IFDIR)
return nfserr_notdir;
else if ((mode & S_IFMT) == S_IFDIR)
if (mode == S_IFDIR)
return nfserr_isdir;
else
return nfserr_inval;
}
if (type < 0 && (mode & S_IFMT) == -type) {
if (rqstp->rq_vers == 4 && (mode & S_IFMT) == S_IFLNK)
return nfserr_symlink;
else if (type == -S_IFDIR)
return nfserr_isdir;
else
return nfserr_notdir;
}
return 0;
}
static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp,
......
......@@ -35,6 +35,7 @@
#ifndef _NFSD4_STATE_H
#define _NFSD4_STATE_H
#include <linux/idr.h>
#include <linux/sunrpc/svc_xprt.h>
#include <linux/nfsd/nfsfh.h>
#include "nfsfh.h"
......@@ -45,24 +46,20 @@ typedef struct {
} clientid_t;
typedef struct {
u32 so_boot;
u32 so_stateownerid;
u32 so_fileid;
clientid_t so_clid;
u32 so_id;
} stateid_opaque_t;
typedef struct {
u32 si_generation;
stateid_opaque_t si_opaque;
} stateid_t;
#define si_boot si_opaque.so_boot
#define si_stateownerid si_opaque.so_stateownerid
#define si_fileid si_opaque.so_fileid
#define STATEID_FMT "(%08x/%08x/%08x/%08x)"
#define STATEID_VAL(s) \
(s)->si_boot, \
(s)->si_stateownerid, \
(s)->si_fileid, \
(s)->si_opaque.so_clid.cl_boot, \
(s)->si_opaque.so_clid.cl_id, \
(s)->si_opaque.so_id, \
(s)->si_generation
struct nfsd4_callback {
......@@ -76,17 +73,27 @@ struct nfsd4_callback {
bool cb_done;
};
struct nfs4_stid {
#define NFS4_OPEN_STID 1
#define NFS4_LOCK_STID 2
#define NFS4_DELEG_STID 4
/* For an open stateid kept around *only* to process close replays: */
#define NFS4_CLOSED_STID 8
unsigned char sc_type;
stateid_t sc_stateid;
struct nfs4_client *sc_client;
};
struct nfs4_delegation {
struct nfs4_stid dl_stid; /* must be first field */
struct list_head dl_perfile;
struct list_head dl_perclnt;
struct list_head dl_recall_lru; /* delegation recalled */
atomic_t dl_count; /* ref count */
struct nfs4_client *dl_client;
struct nfs4_file *dl_file;
u32 dl_type;
time_t dl_time;
/* For recall: */
stateid_t dl_stateid;
struct knfsd_fh dl_fh;
int dl_retries;
struct nfsd4_callback dl_recall;
......@@ -104,6 +111,11 @@ struct nfs4_cb_conn {
struct svc_xprt *cb_xprt; /* minorversion 1 only */
};
static inline struct nfs4_delegation *delegstateid(struct nfs4_stid *s)
{
return container_of(s, struct nfs4_delegation, dl_stid);
}
/* Maximum number of slots per session. 160 is useful for long haul TCP */
#define NFSD_MAX_SLOTS_PER_SESSION 160
/* Maximum number of operations per session compound */
......@@ -220,6 +232,7 @@ struct nfs4_client {
struct list_head cl_idhash; /* hash by cl_clientid.id */
struct list_head cl_strhash; /* hash by cl_name */
struct list_head cl_openowners;
struct idr cl_stateids; /* stateid lookup */
struct list_head cl_delegations;
struct list_head cl_lru; /* tail queue */
struct xdr_netobj cl_name; /* id generated by client */
......@@ -245,6 +258,7 @@ struct nfs4_client {
#define NFSD4_CB_UP 0
#define NFSD4_CB_UNKNOWN 1
#define NFSD4_CB_DOWN 2
#define NFSD4_CB_FAULT 3
int cl_cb_state;
struct nfsd4_callback cl_cb_null;
struct nfsd4_session *cl_cb_session;
......@@ -293,6 +307,9 @@ static inline void
update_stateid(stateid_t *stateid)
{
stateid->si_generation++;
/* Wraparound recommendation from 3530bis-13 9.1.3.2: */
if (stateid->si_generation == 0)
stateid->si_generation = 1;
}
/* A reasonable value for REPLAY_ISIZE was estimated as follows:
......@@ -312,49 +329,57 @@ struct nfs4_replay {
__be32 rp_status;
unsigned int rp_buflen;
char *rp_buf;
unsigned intrp_allocated;
struct knfsd_fh rp_openfh;
char rp_ibuf[NFSD4_REPLAY_ISIZE];
};
/*
* nfs4_stateowner can either be an open_owner, or a lock_owner
*
* so_idhash: stateid_hashtbl[] for open owner, lockstateid_hashtbl[]
* for lock_owner
* so_strhash: ownerstr_hashtbl[] for open_owner, lock_ownerstr_hashtbl[]
* for lock_owner
* so_perclient: nfs4_client->cl_perclient entry - used when nfs4_client
* struct is reaped.
* so_perfilestate: heads the list of nfs4_stateid (either open or lock)
* and is used to ensure no dangling nfs4_stateid references when we
* release a stateowner.
* so_perlockowner: (open) nfs4_stateid->st_perlockowner entry - used when
* close is called to reap associated byte-range locks
* so_close_lru: (open) stateowner is placed on this list instead of being
* reaped (when so_perfilestate is empty) to hold the last close replay.
* reaped by laundramat thread after lease period.
*/
struct nfs4_stateowner {
struct kref so_ref;
struct list_head so_idhash; /* hash by so_id */
struct list_head so_strhash; /* hash by op_name */
struct list_head so_perclient;
struct list_head so_stateids;
struct list_head so_perstateid; /* for lockowners only */
struct list_head so_close_lru; /* tail queue */
time_t so_time; /* time of placement on so_close_lru */
int so_is_open_owner; /* 1=openowner,0=lockowner */
u32 so_id;
struct nfs4_client * so_client;
/* after increment in ENCODE_SEQID_OP_TAIL, represents the next
* sequence id expected from the client: */
u32 so_seqid;
struct xdr_netobj so_owner; /* open owner name */
int so_confirmed; /* successful OPEN_CONFIRM? */
struct nfs4_replay so_replay;
bool so_is_open_owner;
};
struct nfs4_openowner {
struct nfs4_stateowner oo_owner; /* must be first field */
struct list_head oo_perclient;
/*
* We keep around openowners a little while after last close,
* which saves clients from having to confirm, and allows us to
* handle close replays if they come soon enough. The close_lru
* is a list of such openowners, to be reaped by the laundromat
* thread eventually if they remain unused:
*/
struct list_head oo_close_lru;
struct nfs4_ol_stateid *oo_last_closed_stid;
time_t oo_time; /* time of placement on so_close_lru */
#define NFS4_OO_CONFIRMED 1
#define NFS4_OO_PURGE_CLOSE 2
#define NFS4_OO_NEW 4
unsigned char oo_flags;
};
struct nfs4_lockowner {
struct nfs4_stateowner lo_owner; /* must be first element */
struct list_head lo_perstateid; /* for lockowners only */
struct list_head lo_list; /* for temporary uses */
};
static inline struct nfs4_openowner * openowner(struct nfs4_stateowner *so)
{
return container_of(so, struct nfs4_openowner, oo_owner);
}
static inline struct nfs4_lockowner * lockowner(struct nfs4_stateowner *so)
{
return container_of(so, struct nfs4_lockowner, lo_owner);
}
/*
* nfs4_file: a file opened by some number of (open) nfs4_stateowners.
* o fi_perfile list is used to search for conflicting
......@@ -368,17 +393,17 @@ struct nfs4_file {
/* One each for O_RDONLY, O_WRONLY, O_RDWR: */
struct file * fi_fds[3];
/*
* Each open or lock stateid contributes 1 to either
* fi_access[O_RDONLY], fi_access[O_WRONLY], or both, depending
* on open or lock mode:
* Each open or lock stateid contributes 0-4 to the counts
* below depending on which bits are set in st_access_bitmap:
* 1 to fi_access[O_RDONLY] if NFS4_SHARE_ACCES_READ is set
* + 1 to fi_access[O_WRONLY] if NFS4_SHARE_ACCESS_WRITE is set
* + 1 to both of the above if NFS4_SHARE_ACCESS_BOTH is set.
*/
atomic_t fi_access[2];
struct file *fi_deleg_file;
struct file_lock *fi_lease;
atomic_t fi_delegees;
struct inode *fi_inode;
u32 fi_id; /* used with stateowner->so_id
* for stateid_hashtbl hash */
bool fi_had_conflict;
};
......@@ -408,50 +433,27 @@ static inline struct file *find_any_file(struct nfs4_file *f)
return f->fi_fds[O_RDONLY];
}
/*
* nfs4_stateid can either be an open stateid or (eventually) a lock stateid
*
* (open)nfs4_stateid: one per (open)nfs4_stateowner, nfs4_file
*
* st_hash: stateid_hashtbl[] entry or lockstateid_hashtbl entry
* st_perfile: file_hashtbl[] entry.
* st_perfile_state: nfs4_stateowner->so_perfilestate
* st_perlockowner: (open stateid) list of lock nfs4_stateowners
* st_access_bmap: used only for open stateid
* st_deny_bmap: used only for open stateid
* st_openstp: open stateid lock stateid was derived from
*
* XXX: open stateids and lock stateids have diverged sufficiently that
* we should consider defining separate structs for the two cases.
*/
struct nfs4_stateid {
struct list_head st_hash;
/* "ol" stands for "Open or Lock". Better suggestions welcome. */
struct nfs4_ol_stateid {
struct nfs4_stid st_stid; /* must be first field */
struct list_head st_perfile;
struct list_head st_perstateowner;
struct list_head st_lockowners;
struct nfs4_stateowner * st_stateowner;
struct nfs4_file * st_file;
stateid_t st_stateid;
unsigned long st_access_bmap;
unsigned long st_deny_bmap;
struct nfs4_stateid * st_openstp;
struct nfs4_ol_stateid * st_openstp;
};
static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
{
return container_of(s, struct nfs4_ol_stateid, st_stid);
}
/* flags for preprocess_seqid_op() */
#define HAS_SESSION 0x00000001
#define CONFIRM 0x00000002
#define OPEN_STATE 0x00000004
#define LOCK_STATE 0x00000008
#define RD_STATE 0x00000010
#define WR_STATE 0x00000020
#define CLOSE_STATE 0x00000040
#define seqid_mutating_err(err) \
(((err) != nfserr_stale_clientid) && \
((err) != nfserr_bad_seqid) && \
((err) != nfserr_stale_stateid) && \
((err) != nfserr_bad_stateid))
struct nfsd4_compound_state;
......@@ -461,7 +463,8 @@ extern void nfs4_lock_state(void);
extern void nfs4_unlock_state(void);
extern int nfs4_in_grace(void);
extern __be32 nfs4_check_open_reclaim(clientid_t *clid);
extern void nfs4_free_stateowner(struct kref *kref);
extern void nfs4_free_openowner(struct nfs4_openowner *);
extern void nfs4_free_lockowner(struct nfs4_lockowner *);
extern int set_callback_cred(void);
extern void nfsd4_probe_callback(struct nfs4_client *clp);
extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
......@@ -473,7 +476,7 @@ extern void nfsd4_destroy_callback_queue(void);
extern void nfsd4_shutdown_callback(struct nfs4_client *);
extern void nfs4_put_delegation(struct nfs4_delegation *dp);
extern __be32 nfs4_make_rec_clidname(char *clidname, struct xdr_netobj *clname);
extern void nfsd4_init_recdir(char *recdir_name);
extern void nfsd4_init_recdir(void);
extern int nfsd4_recdir_load(void);
extern void nfsd4_shutdown_recdir(void);
extern int nfs4_client_to_reclaim(const char *name);
......@@ -482,18 +485,7 @@ extern void nfsd4_recdir_purge_old(void);
extern int nfsd4_create_clid_dir(struct nfs4_client *clp);
extern void nfsd4_remove_clid_dir(struct nfs4_client *clp);
extern void release_session_client(struct nfsd4_session *);
extern __be32 nfs4_validate_stateid(stateid_t *, int);
static inline void
nfs4_put_stateowner(struct nfs4_stateowner *so)
{
kref_put(&so->so_ref, nfs4_free_stateowner);
}
static inline void
nfs4_get_stateowner(struct nfs4_stateowner *so)
{
kref_get(&so->so_ref);
}
extern __be32 nfs4_validate_stateid(struct nfs4_client *, stateid_t *);
extern void nfsd4_purge_closed_stateid(struct nfs4_stateowner *);
#endif /* NFSD4_STATE_H */
......@@ -168,6 +168,8 @@ int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp)
{
if (d_mountpoint(dentry))
return 1;
if (nfsd4_is_junction(dentry))
return 1;
if (!(exp->ex_flags & NFSEXP_V4ROOT))
return 0;
return dentry->d_inode != NULL;
......@@ -502,7 +504,7 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int flags = 0;
/* Get inode */
error = fh_verify(rqstp, fhp, 0 /* S_IFREG */, NFSD_MAY_SATTR);
error = fh_verify(rqstp, fhp, 0, NFSD_MAY_SATTR);
if (error)
return error;
......@@ -592,6 +594,22 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, struct nfs4_ac
return error;
}
#define NFSD_XATTR_JUNCTION_PREFIX XATTR_TRUSTED_PREFIX "junction."
#define NFSD_XATTR_JUNCTION_TYPE NFSD_XATTR_JUNCTION_PREFIX "type"
int nfsd4_is_junction(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
if (inode == NULL)
return 0;
if (inode->i_mode & S_IXUGO)
return 0;
if (!(inode->i_mode & S_ISVTX))
return 0;
if (vfs_getxattr(dentry, NFSD_XATTR_JUNCTION_TYPE, NULL, 0) <= 0)
return 0;
return 1;
}
#endif /* defined(CONFIG_NFSD_V4) */
#ifdef CONFIG_NFSD_V3
......@@ -1352,7 +1370,7 @@ __be32
do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
char *fname, int flen, struct iattr *iap,
struct svc_fh *resfhp, int createmode, u32 *verifier,
int *truncp, int *created)
bool *truncp, bool *created)
{
struct dentry *dentry, *dchild = NULL;
struct inode *dirp;
......@@ -1632,10 +1650,12 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_CREATE);
if (err)
goto out;
err = fh_verify(rqstp, tfhp, -S_IFDIR, NFSD_MAY_NOP);
err = fh_verify(rqstp, tfhp, 0, NFSD_MAY_NOP);
if (err)
goto out;
err = nfserr_isdir;
if (S_ISDIR(tfhp->fh_dentry->d_inode->i_mode))
goto out;
err = nfserr_perm;
if (!len)
goto out;
......@@ -2114,7 +2134,8 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
/* Allow read access to binaries even when mode 111 */
if (err == -EACCES && S_ISREG(inode->i_mode) &&
acc == (NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE))
(acc == (NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE) ||
acc == (NFSD_MAY_READ | NFSD_MAY_READ_IF_EXEC)))
err = inode_permission(inode, MAY_EXEC);
return err? nfserrno(err) : 0;
......
......@@ -11,20 +11,21 @@
* Flags for nfsd_permission
*/
#define NFSD_MAY_NOP 0
#define NFSD_MAY_EXEC 1 /* == MAY_EXEC */
#define NFSD_MAY_WRITE 2 /* == MAY_WRITE */
#define NFSD_MAY_READ 4 /* == MAY_READ */
#define NFSD_MAY_SATTR 8
#define NFSD_MAY_TRUNC 16
#define NFSD_MAY_LOCK 32
#define NFSD_MAY_MASK 63
#define NFSD_MAY_EXEC 0x001 /* == MAY_EXEC */
#define NFSD_MAY_WRITE 0x002 /* == MAY_WRITE */
#define NFSD_MAY_READ 0x004 /* == MAY_READ */
#define NFSD_MAY_SATTR 0x008
#define NFSD_MAY_TRUNC 0x010
#define NFSD_MAY_LOCK 0x020
#define NFSD_MAY_MASK 0x03f
/* extra hints to permission and open routines: */
#define NFSD_MAY_OWNER_OVERRIDE 64
#define NFSD_MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/
#define NFSD_MAY_BYPASS_GSS_ON_ROOT 256
#define NFSD_MAY_NOT_BREAK_LEASE 512
#define NFSD_MAY_BYPASS_GSS 1024
#define NFSD_MAY_OWNER_OVERRIDE 0x040
#define NFSD_MAY_LOCAL_ACCESS 0x080 /* for device special files */
#define NFSD_MAY_BYPASS_GSS_ON_ROOT 0x100
#define NFSD_MAY_NOT_BREAK_LEASE 0x200
#define NFSD_MAY_BYPASS_GSS 0x400
#define NFSD_MAY_READ_IF_EXEC 0x800
#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
......@@ -61,7 +62,7 @@ __be32 nfsd_access(struct svc_rqst *, struct svc_fh *, u32 *, u32 *);
__be32 do_nfsd_create(struct svc_rqst *, struct svc_fh *,
char *name, int len, struct iattr *attrs,
struct svc_fh *res, int createmode,
u32 *verifier, int *truncp, int *created);
u32 *verifier, bool *truncp, bool *created);
__be32 nfsd_commit(struct svc_rqst *, struct svc_fh *,
loff_t, unsigned long);
#endif /* CONFIG_NFSD_V3 */
......
......@@ -81,7 +81,6 @@ struct nfsd4_access {
struct nfsd4_close {
u32 cl_seqid; /* request */
stateid_t cl_stateid; /* request+response */
struct nfs4_stateowner * cl_stateowner; /* response */
};
struct nfsd4_commit {
......@@ -131,7 +130,7 @@ struct nfsd4_link {
struct nfsd4_lock_denied {
clientid_t ld_clientid;
struct nfs4_stateowner *ld_sop;
struct xdr_netobj ld_owner;
u64 ld_start;
u64 ld_length;
u32 ld_type;
......@@ -165,9 +164,6 @@ struct nfsd4_lock {
} ok;
struct nfsd4_lock_denied denied;
} u;
/* The lk_replay_owner is the open owner in the open_to_lock_owner
* case and the lock owner otherwise: */
struct nfs4_stateowner *lk_replay_owner;
};
#define lk_new_open_seqid v.new.open_seqid
#define lk_new_open_stateid v.new.open_stateid
......@@ -188,7 +184,6 @@ struct nfsd4_lockt {
struct xdr_netobj lt_owner;
u64 lt_offset;
u64 lt_length;
struct nfs4_stateowner * lt_stateowner;
struct nfsd4_lock_denied lt_denied;
};
......@@ -199,7 +194,6 @@ struct nfsd4_locku {
stateid_t lu_stateid;
u64 lu_offset;
u64 lu_length;
struct nfs4_stateowner *lu_stateowner;
};
......@@ -232,8 +226,11 @@ struct nfsd4_open {
u32 op_recall; /* recall */
struct nfsd4_change_info op_cinfo; /* response */
u32 op_rflags; /* response */
int op_truncate; /* used during processing */
struct nfs4_stateowner *op_stateowner; /* used during processing */
bool op_truncate; /* used during processing */
bool op_created; /* used during processing */
struct nfs4_openowner *op_openowner; /* used during processing */
struct nfs4_file *op_file; /* used during processing */
struct nfs4_ol_stateid *op_stp; /* used during processing */
struct nfs4_acl *op_acl;
};
#define op_iattr iattr
......@@ -243,7 +240,6 @@ struct nfsd4_open_confirm {
stateid_t oc_req_stateid /* request */;
u32 oc_seqid /* request */;
stateid_t oc_resp_stateid /* response */;
struct nfs4_stateowner * oc_stateowner; /* response */
};
struct nfsd4_open_downgrade {
......@@ -251,7 +247,6 @@ struct nfsd4_open_downgrade {
u32 od_seqid;
u32 od_share_access;
u32 od_share_deny;
struct nfs4_stateowner *od_stateowner;
};
......@@ -325,8 +320,7 @@ struct nfsd4_setattr {
struct nfsd4_setclientid {
nfs4_verifier se_verf; /* request */
u32 se_namelen; /* request */
char * se_name; /* request */
struct xdr_netobj se_name;
u32 se_callback_prog; /* request */
u32 se_callback_netid_len; /* request */
char * se_callback_netid_val; /* request */
......@@ -351,7 +345,6 @@ struct nfsd4_saved_compoundargs {
struct nfsd4_test_stateid {
__be32 ts_num_ids;
__be32 ts_has_session;
struct nfsd4_compoundargs *ts_saved_args;
struct nfsd4_saved_compoundargs ts_savedp;
};
......@@ -405,6 +398,10 @@ struct nfsd4_destroy_session {
struct nfs4_sessionid sessionid;
};
struct nfsd4_destroy_clientid {
clientid_t clientid;
};
struct nfsd4_reclaim_complete {
u32 rca_one_fs;
};
......@@ -532,6 +529,7 @@ int nfs4svc_decode_compoundargs(struct svc_rqst *, __be32 *,
struct nfsd4_compoundargs *);
int nfs4svc_encode_compoundres(struct svc_rqst *, __be32 *,
struct nfsd4_compoundres *);
int nfsd4_check_resp_size(struct nfsd4_compoundres *, u32);
void nfsd4_encode_operation(struct nfsd4_compoundres *, struct nfsd4_op *);
void nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op);
__be32 nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
......@@ -558,11 +556,13 @@ extern __be32 nfsd4_sequence(struct svc_rqst *,
extern __be32 nfsd4_destroy_session(struct svc_rqst *,
struct nfsd4_compound_state *,
struct nfsd4_destroy_session *);
extern __be32 nfsd4_destroy_clientid(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_destroy_clientid *);
__be32 nfsd4_reclaim_complete(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_reclaim_complete *);
extern __be32 nfsd4_process_open1(struct nfsd4_compound_state *,
struct nfsd4_open *open);
extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp,
struct svc_fh *current_fh, struct nfsd4_open *open);
extern void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status);
extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp,
struct nfsd4_compound_state *, struct nfsd4_open_confirm *oc);
extern __be32 nfsd4_close(struct svc_rqst *rqstp,
......
......@@ -145,11 +145,6 @@ struct f_owner_ex {
#define F_SHLCK 8 /* or 4 */
#endif
/* for leases */
#ifndef F_INPROGRESS
#define F_INPROGRESS 16
#endif
/* operations for bsd flock(), also used by the kernel implementation */
#define LOCK_SH 1 /* shared lock */
#define LOCK_EX 2 /* exclusive lock */
......
......@@ -1063,6 +1063,8 @@ static inline int file_check_writeable(struct file *filp)
#define FL_LEASE 32 /* lease held on this file */
#define FL_CLOSE 64 /* unlock on close */
#define FL_SLEEP 128 /* A blocking lock */
#define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */
#define FL_UNLOCK_PENDING 512 /* Lease is being broken */
/*
* Special return value from posix_lock_file() and vfs_lock_file() for
......@@ -1109,7 +1111,7 @@ struct file_lock {
struct list_head fl_link; /* doubly linked list of all locks */
struct list_head fl_block; /* circular list of blocked processes */
fl_owner_t fl_owner;
unsigned char fl_flags;
unsigned int fl_flags;
unsigned char fl_type;
unsigned int fl_pid;
struct pid *fl_nspid;
......@@ -1119,7 +1121,9 @@ struct file_lock {
loff_t fl_end;
struct fasync_struct * fl_fasync; /* for lease break notifications */
unsigned long fl_break_time; /* for nonblocking lease breaks */
/* for lease breaks: */
unsigned long fl_break_time;
unsigned long fl_downgrade_time;
const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */
const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
......
......@@ -373,6 +373,22 @@ enum nfsstat4 {
NFS4ERR_DELEG_REVOKED = 10087, /* deleg./layout revoked */
};
static inline bool seqid_mutating_err(u32 err)
{
/* rfc 3530 section 8.1.5: */
switch (err) {
case NFS4ERR_STALE_CLIENTID:
case NFS4ERR_STALE_STATEID:
case NFS4ERR_BAD_STATEID:
case NFS4ERR_BAD_SEQID:
case NFS4ERR_BADXDR:
case NFS4ERR_RESOURCE:
case NFS4ERR_NOFILEHANDLE:
return false;
};
return true;
}
/*
* Note: NF4BAD is not actually part of the protocol; it is just used
* internally by nfsd.
......@@ -394,7 +410,10 @@ enum open_claim_type4 {
NFS4_OPEN_CLAIM_NULL = 0,
NFS4_OPEN_CLAIM_PREVIOUS = 1,
NFS4_OPEN_CLAIM_DELEGATE_CUR = 2,
NFS4_OPEN_CLAIM_DELEGATE_PREV = 3
NFS4_OPEN_CLAIM_DELEGATE_PREV = 3,
NFS4_OPEN_CLAIM_FH = 4, /* 4.1 */
NFS4_OPEN_CLAIM_DELEG_CUR_FH = 5, /* 4.1 */
NFS4_OPEN_CLAIM_DELEG_PREV_FH = 6, /* 4.1 */
};
enum opentype4 {
......
header-y += const.h
header-y += debug.h
header-y += export.h
header-y += nfsfh.h
header-y += stats.h
header-y += syscall.h
/*
* include/linux/nfsd/const.h
*
* Various constants related to NFS.
*
* Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de>
*/
#ifndef _LINUX_NFSD_CONST_H
#define _LINUX_NFSD_CONST_H
#include <linux/nfs.h>
#include <linux/nfs2.h>
#include <linux/nfs3.h>
#include <linux/nfs4.h>
/*
* Maximum protocol version supported by knfsd
*/
#define NFSSVC_MAXVERS 3
/*
* Maximum blocksizes supported by daemon under various circumstances.
*/
#define NFSSVC_MAXBLKSIZE RPCSVC_MAXPAYLOAD
/* NFSv2 is limited by the protocol specification, see RFC 1094 */
#define NFSSVC_MAXBLKSIZE_V2 (8*1024)
#ifdef __KERNEL__
#include <linux/sunrpc/msg_prot.h>
/*
* Largest number of bytes we need to allocate for an NFS
* call or reply. Used to control buffer sizes. We use
* the length of v3 WRITE, READDIR and READDIR replies
* which are an RPC header, up to 26 XDR units of reply
* data, and some page data.
*
* Note that accuracy here doesn't matter too much as the
* size is rounded up to a page size when allocating space.
*/
#define NFSD_BUFSIZE ((RPC_MAX_HEADER_WITH_AUTH+26)*XDR_UNIT + NFSSVC_MAXBLKSIZE)
#ifdef CONFIG_NFSD_V4
# define NFSSVC_XDRSIZE NFS4_SVC_XDRSIZE
#elif defined(CONFIG_NFSD_V3)
# define NFSSVC_XDRSIZE NFS3_SVC_XDRSIZE
#else
# define NFSSVC_XDRSIZE NFS2_SVC_XDRSIZE
#endif
#endif /* __KERNEL__ */
#endif /* _LINUX_NFSD_CONST_H */
......@@ -96,7 +96,6 @@ struct svc_export {
struct auth_domain * ex_client;
int ex_flags;
struct path ex_path;
char *ex_pathname;
uid_t ex_anon_uid;
gid_t ex_anon_gid;
int ex_fsid;
......@@ -137,6 +136,7 @@ struct svc_export * rqst_exp_get_by_name(struct svc_rqst *,
struct path *);
struct svc_export * rqst_exp_parent(struct svc_rqst *,
struct path *);
struct svc_export * rqst_find_fsidzero_export(struct svc_rqst *);
int exp_rootfh(struct auth_domain *,
char *path, struct knfsd_fh *, int maxsize);
__be32 exp_pseudoroot(struct svc_rqst *, struct svc_fh *);
......
......@@ -14,11 +14,14 @@
#ifndef _LINUX_NFSD_FH_H
#define _LINUX_NFSD_FH_H
# include <linux/types.h>
#include <linux/types.h>
#include <linux/nfs.h>
#include <linux/nfs2.h>
#include <linux/nfs3.h>
#include <linux/nfs4.h>
#ifdef __KERNEL__
# include <linux/sunrpc/svc.h>
#endif
#include <linux/nfsd/const.h>
/*
* This is the old "dentry style" Linux NFSv2 file handle.
......
/*
* include/linux/nfsd/syscall.h
*
* This file holds all declarations for the knfsd syscall interface.
*
* Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de>
*/
#ifndef NFSD_SYSCALL_H
#define NFSD_SYSCALL_H
#include <linux/types.h>
#include <linux/nfsd/export.h>
/*
* Version of the syscall interface
*/
#define NFSCTL_VERSION 0x0201
/*
* These are the commands understood by nfsctl().
*/
#define NFSCTL_SVC 0 /* This is a server process. */
#define NFSCTL_ADDCLIENT 1 /* Add an NFS client. */
#define NFSCTL_DELCLIENT 2 /* Remove an NFS client. */
#define NFSCTL_EXPORT 3 /* export a file system. */
#define NFSCTL_UNEXPORT 4 /* unexport a file system. */
/*#define NFSCTL_UGIDUPDATE 5 / * update a client's uid/gid map. DISCARDED */
/*#define NFSCTL_GETFH 6 / * get an fh by ino DISCARDED */
#define NFSCTL_GETFD 7 /* get an fh by path (used by mountd) */
#define NFSCTL_GETFS 8 /* get an fh by path with max FH len */
/* SVC */
struct nfsctl_svc {
unsigned short svc_port;
int svc_nthreads;
};
/* ADDCLIENT/DELCLIENT */
struct nfsctl_client {
char cl_ident[NFSCLNT_IDMAX+1];
int cl_naddr;
struct in_addr cl_addrlist[NFSCLNT_ADDRMAX];
int cl_fhkeytype;
int cl_fhkeylen;
unsigned char cl_fhkey[NFSCLNT_KEYMAX];
};
/* EXPORT/UNEXPORT */
struct nfsctl_export {
char ex_client[NFSCLNT_IDMAX+1];
char ex_path[NFS_MAXPATHLEN+1];
__kernel_old_dev_t ex_dev;
__kernel_ino_t ex_ino;
int ex_flags;
__kernel_uid_t ex_anon_uid;
__kernel_gid_t ex_anon_gid;
};
/* GETFD */
struct nfsctl_fdparm {
struct sockaddr gd_addr;
char gd_path[NFS_MAXPATHLEN+1];
int gd_version;
};
/* GETFS - GET Filehandle with Size */
struct nfsctl_fsparm {
struct sockaddr gd_addr;
char gd_path[NFS_MAXPATHLEN+1];
int gd_maxlen;
};
/*
* This is the argument union.
*/
struct nfsctl_arg {
int ca_version; /* safeguard */
union {
struct nfsctl_svc u_svc;
struct nfsctl_client u_client;
struct nfsctl_export u_export;
struct nfsctl_fdparm u_getfd;
struct nfsctl_fsparm u_getfs;
/*
* The following dummy member is needed to preserve binary compatibility
* on platforms where alignof(void*)>alignof(int). It's needed because
* this union used to contain a member (u_umap) which contained a
* pointer.
*/
void *u_ptr;
} u;
#define ca_svc u.u_svc
#define ca_client u.u_client
#define ca_export u.u_export
#define ca_getfd u.u_getfd
#define ca_getfs u.u_getfs
};
union nfsctl_res {
__u8 cr_getfh[NFS_FHSIZE];
struct knfsd_fh cr_getfs;
};
#ifdef __KERNEL__
/*
* Kernel syscall implementation.
*/
extern int exp_addclient(struct nfsctl_client *ncp);
extern int exp_delclient(struct nfsctl_client *ncp);
extern int exp_export(struct nfsctl_export *nxp);
extern int exp_unexport(struct nfsctl_export *nxp);
#endif /* __KERNEL__ */
#endif /* NFSD_SYSCALL_H */
......@@ -218,7 +218,13 @@ static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
{
const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1;
const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2;
return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr);
if (!ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr))
return false;
else if (ipv6_addr_type(&sin1->sin6_addr) & IPV6_ADDR_LINKLOCAL)
return sin1->sin6_scope_id == sin2->sin6_scope_id;
return true;
}
static inline bool __rpc_copy_addr6(struct sockaddr *dst,
......
......@@ -212,11 +212,6 @@ static inline void svc_putu32(struct kvec *iov, __be32 val)
iov->iov_len += sizeof(__be32);
}
union svc_addr_u {
struct in_addr addr;
struct in6_addr addr6;
};
/*
* The context of a single thread, including the request currently being
* processed.
......@@ -225,8 +220,12 @@ struct svc_rqst {
struct list_head rq_list; /* idle list */
struct list_head rq_all; /* all threads list */
struct svc_xprt * rq_xprt; /* transport ptr */
struct sockaddr_storage rq_addr; /* peer address */
size_t rq_addrlen;
struct sockaddr_storage rq_daddr; /* dest addr of request
* - reply from here */
size_t rq_daddrlen;
struct svc_serv * rq_server; /* RPC service definition */
struct svc_pool * rq_pool; /* thread pool */
......@@ -255,9 +254,6 @@ struct svc_rqst {
unsigned short
rq_secure : 1; /* secure port */
union svc_addr_u rq_daddr; /* dest addr of request
* - reply from here */
void * rq_argp; /* decoded arguments */
void * rq_resp; /* xdr'd results */
void * rq_auth_data; /* flavor-specific data */
......@@ -300,6 +296,21 @@ static inline struct sockaddr *svc_addr(const struct svc_rqst *rqst)
return (struct sockaddr *) &rqst->rq_addr;
}
static inline struct sockaddr_in *svc_daddr_in(const struct svc_rqst *rqst)
{
return (struct sockaddr_in *) &rqst->rq_daddr;
}
static inline struct sockaddr_in6 *svc_daddr_in6(const struct svc_rqst *rqst)
{
return (struct sockaddr_in6 *) &rqst->rq_daddr;
}
static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst)
{
return (struct sockaddr *) &rqst->rq_daddr;
}
/*
* Check buffer bounds after decoding arguments
*/
......@@ -340,7 +351,8 @@ struct svc_deferred_req {
struct svc_xprt *xprt;
struct sockaddr_storage addr; /* where reply must go */
size_t addrlen;
union svc_addr_u daddr; /* where reply must come from */
struct sockaddr_storage daddr; /* where reply must come from */
size_t daddrlen;
struct cache_deferred_req handle;
size_t xprt_hlen;
int argslen;
......@@ -404,7 +416,7 @@ struct svc_procedure {
struct svc_serv *svc_create(struct svc_program *, unsigned int,
void (*shutdown)(struct svc_serv *));
struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
struct svc_pool *pool);
struct svc_pool *pool, int node);
void svc_exit_thread(struct svc_rqst *);
struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
void (*shutdown)(struct svc_serv *),
......
......@@ -1084,3 +1084,6 @@ void unregister_rpc_pipefs(void)
kmem_cache_destroy(rpc_inode_cachep);
unregister_filesystem(&rpc_pipe_fs_type);
}
/* Make 'mount -t rpc_pipefs ...' autoload this module. */
MODULE_ALIAS("rpc_pipefs");
......@@ -295,6 +295,18 @@ svc_pool_map_put(void)
}
static int svc_pool_map_get_node(unsigned int pidx)
{
const struct svc_pool_map *m = &svc_pool_map;
if (m->count) {
if (m->mode == SVC_POOL_PERCPU)
return cpu_to_node(m->pool_to[pidx]);
if (m->mode == SVC_POOL_PERNODE)
return m->pool_to[pidx];
}
return NUMA_NO_NODE;
}
/*
* Set the given thread's cpus_allowed mask so that it
* will only run on cpus in the given pool.
......@@ -499,7 +511,7 @@ EXPORT_SYMBOL_GPL(svc_destroy);
* We allocate pages and place them in rq_argpages.
*/
static int
svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
{
unsigned int pages, arghi;
......@@ -513,7 +525,7 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
arghi = 0;
BUG_ON(pages > RPCSVC_MAXPAGES);
while (pages) {
struct page *p = alloc_page(GFP_KERNEL);
struct page *p = alloc_pages_node(node, GFP_KERNEL, 0);
if (!p)
break;
rqstp->rq_pages[arghi++] = p;
......@@ -536,11 +548,11 @@ svc_release_buffer(struct svc_rqst *rqstp)
}
struct svc_rqst *
svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool)
svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
{
struct svc_rqst *rqstp;
rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL);
rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
if (!rqstp)
goto out_enomem;
......@@ -554,15 +566,15 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool)
rqstp->rq_server = serv;
rqstp->rq_pool = pool;
rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL);
rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
if (!rqstp->rq_argp)
goto out_thread;
rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL);
rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
if (!rqstp->rq_resp)
goto out_thread;
if (!svc_init_buffer(rqstp, serv->sv_max_mesg))
if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
goto out_thread;
return rqstp;
......@@ -647,6 +659,7 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
struct svc_pool *chosen_pool;
int error = 0;
unsigned int state = serv->sv_nrthreads-1;
int node;
if (pool == NULL) {
/* The -1 assumes caller has done a svc_get() */
......@@ -662,14 +675,16 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
nrservs--;
chosen_pool = choose_pool(serv, pool, &state);
rqstp = svc_prepare_thread(serv, chosen_pool);
node = svc_pool_map_get_node(chosen_pool->sp_id);
rqstp = svc_prepare_thread(serv, chosen_pool, node);
if (IS_ERR(rqstp)) {
error = PTR_ERR(rqstp);
break;
}
__module_get(serv->sv_module);
task = kthread_create(serv->sv_function, rqstp, serv->sv_name);
task = kthread_create_on_node(serv->sv_function, rqstp,
node, serv->sv_name);
if (IS_ERR(task)) {
error = PTR_ERR(task);
module_put(serv->sv_module);
......
......@@ -254,8 +254,6 @@ EXPORT_SYMBOL_GPL(svc_create_xprt);
*/
void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
{
struct sockaddr *sin;
memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen);
rqstp->rq_addrlen = xprt->xpt_remotelen;
......@@ -263,15 +261,8 @@ void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
* Destination address in request is needed for binding the
* source address in RPC replies/callbacks later.
*/
sin = (struct sockaddr *)&xprt->xpt_local;
switch (sin->sa_family) {
case AF_INET:
rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr;
break;
case AF_INET6:
rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr;
break;
}
memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen);
rqstp->rq_daddrlen = xprt->xpt_locallen;
}
EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs);
......
......@@ -143,19 +143,20 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
cmh->cmsg_level = SOL_IP;
cmh->cmsg_type = IP_PKTINFO;
pki->ipi_ifindex = 0;
pki->ipi_spec_dst.s_addr = rqstp->rq_daddr.addr.s_addr;
pki->ipi_spec_dst.s_addr =
svc_daddr_in(rqstp)->sin_addr.s_addr;
cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
}
break;
case AF_INET6: {
struct in6_pktinfo *pki = CMSG_DATA(cmh);
struct sockaddr_in6 *daddr = svc_daddr_in6(rqstp);
cmh->cmsg_level = SOL_IPV6;
cmh->cmsg_type = IPV6_PKTINFO;
pki->ipi6_ifindex = 0;
ipv6_addr_copy(&pki->ipi6_addr,
&rqstp->rq_daddr.addr6);
pki->ipi6_ifindex = daddr->sin6_scope_id;
ipv6_addr_copy(&pki->ipi6_addr, &daddr->sin6_addr);
cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
}
break;
......@@ -498,9 +499,13 @@ static int svc_udp_get_dest_address4(struct svc_rqst *rqstp,
struct cmsghdr *cmh)
{
struct in_pktinfo *pki = CMSG_DATA(cmh);
struct sockaddr_in *daddr = svc_daddr_in(rqstp);
if (cmh->cmsg_type != IP_PKTINFO)
return 0;
rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr;
daddr->sin_family = AF_INET;
daddr->sin_addr.s_addr = pki->ipi_spec_dst.s_addr;
return 1;
}
......@@ -511,9 +516,14 @@ static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
struct cmsghdr *cmh)
{
struct in6_pktinfo *pki = CMSG_DATA(cmh);
struct sockaddr_in6 *daddr = svc_daddr_in6(rqstp);
if (cmh->cmsg_type != IPV6_PKTINFO)
return 0;
ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr);
daddr->sin6_family = AF_INET6;
ipv6_addr_copy(&daddr->sin6_addr, &pki->ipi6_addr);
daddr->sin6_scope_id = pki->ipi6_ifindex;
return 1;
}
......@@ -614,6 +624,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
skb_free_datagram_locked(svsk->sk_sk, skb);
return 0;
}
rqstp->rq_daddrlen = svc_addr_len(svc_daddr(rqstp));
if (skb_is_nonlinear(skb)) {
/* we have to copy */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment