Commit 68353984 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag '5.6-smb3-fixes-and-dfs-and-readdir-improvements' of git://git.samba.org/sfrench/cifs-2.6

Pull cifs updates from Steve French:
 "Various SMB3/CIFS fixes including four for stable.

   - Improvement to fallocate (enables 3 additional xfstests)

   - Fix for file creation when mounting with modefromsid

   - Add ability to backup/restore dos attributes and creation time

   - DFS failover and reconnect fixes

   - performance optimization for readir

  Note that due to the upcoming SMB3 Test Event (at SNIA SDC next week)
  there will likely be more changesets near the end of the merge window
  (since we will be testing heavily next week, I held off on some
  patches and I expect some additional multichannel patches as well as
  patches to enable some additional xfstests)"

* tag '5.6-smb3-fixes-and-dfs-and-readdir-improvements' of git://git.samba.org/sfrench/cifs-2.6: (24 commits)
  CIFS: Fix task struct use-after-free on reconnect
  cifs: use PTR_ERR_OR_ZERO() to simplify code
  cifs: add support for fallocate mode 0 for non-sparse files
  cifs: fix NULL dereference in match_prepath
  smb3: fix default permissions on new files when mounting with modefromsid
  CIFS: Add support for setting owner info, dos attributes, and create time
  cifs: remove set but not used variable 'server'
  cifs: Fix memory allocation in __smb2_handle_cancelled_cmd()
  cifs: Fix mount options set in automount
  cifs: fix unitialized variable poential problem with network I/O cache lock patch
  cifs: Fix return value in __update_cache_entry
  cifs: Avoid doing network I/O while holding cache lock
  cifs: Fix potential deadlock when updating vol in cifs_reconnect()
  cifs: Merge is_path_valid() into get_normalized_path()
  cifs: Introduce helpers for finding TCP connection
  cifs: Get rid of kstrdup_const()'d paths
  cifs: Clean up DFS referral cache
  cifs: Don't use iov_iter::type directly
  cifs: set correct max-buffer-size for smb2_ioctl_init()
  cifs: use compounding for open and first query-dir for readdir()
  ...
parents c8994374 f1f27ad7
......@@ -120,17 +120,17 @@ cifs_build_devname(char *nodename, const char *prepath)
/**
* cifs_compose_mount_options - creates mount options for refferral
* cifs_compose_mount_options - creates mount options for referral
* @sb_mountdata: parent/root DFS mount options (template)
* @fullpath: full path in UNC format
* @ref: server's referral
* @ref: optional server's referral
* @devname: optional pointer for saving device name
*
* creates mount options for submount based on template options sb_mountdata
* and replacing unc,ip,prefixpath options with ones we've got form ref_unc.
*
* Returns: pointer to new mount options or ERR_PTR.
* Caller is responcible for freeing retunrned value if it is not error.
* Caller is responsible for freeing returned value if it is not error.
*/
char *cifs_compose_mount_options(const char *sb_mountdata,
const char *fullpath,
......@@ -150,18 +150,27 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
if (sb_mountdata == NULL)
return ERR_PTR(-EINVAL);
if (strlen(fullpath) - ref->path_consumed) {
prepath = fullpath + ref->path_consumed;
/* skip initial delimiter */
if (*prepath == '/' || *prepath == '\\')
prepath++;
}
if (ref) {
if (strlen(fullpath) - ref->path_consumed) {
prepath = fullpath + ref->path_consumed;
/* skip initial delimiter */
if (*prepath == '/' || *prepath == '\\')
prepath++;
}
name = cifs_build_devname(ref->node_name, prepath);
if (IS_ERR(name)) {
rc = PTR_ERR(name);
name = NULL;
goto compose_mount_options_err;
name = cifs_build_devname(ref->node_name, prepath);
if (IS_ERR(name)) {
rc = PTR_ERR(name);
name = NULL;
goto compose_mount_options_err;
}
} else {
name = cifs_build_devname((char *)fullpath, NULL);
if (IS_ERR(name)) {
rc = PTR_ERR(name);
name = NULL;
goto compose_mount_options_err;
}
}
rc = dns_resolve_server_name_to_ip(name, &srvIP);
......@@ -225,6 +234,8 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
if (devname)
*devname = name;
else
kfree(name);
/*cifs_dbg(FYI, "%s: parent mountdata: %s\n", __func__, sb_mountdata);*/
/*cifs_dbg(FYI, "%s: submount mountdata: %s\n", __func__, mountdata );*/
......@@ -241,23 +252,23 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
}
/**
* cifs_dfs_do_refmount - mounts specified path using provided refferal
* cifs_dfs_do_mount - mounts specified path using DFS full path
*
* Always pass down @fullpath to smb3_do_mount() so we can use the root server
* to perform failover in case we failed to connect to the first target in the
* referral.
*
* @cifs_sb: parent/root superblock
* @fullpath: full path in UNC format
* @ref: server's referral
*/
static struct vfsmount *cifs_dfs_do_refmount(struct dentry *mntpt,
struct cifs_sb_info *cifs_sb,
const char *fullpath, const struct dfs_info3_param *ref)
static struct vfsmount *cifs_dfs_do_mount(struct dentry *mntpt,
struct cifs_sb_info *cifs_sb,
const char *fullpath)
{
struct vfsmount *mnt;
char *mountdata;
char *devname;
/*
* Always pass down the DFS full path to smb3_do_mount() so we
* can use it later for failover.
*/
devname = kstrndup(fullpath, strlen(fullpath), GFP_KERNEL);
if (!devname)
return ERR_PTR(-ENOMEM);
......@@ -266,7 +277,7 @@ static struct vfsmount *cifs_dfs_do_refmount(struct dentry *mntpt,
/* strip first '\' from fullpath */
mountdata = cifs_compose_mount_options(cifs_sb->mountdata,
fullpath + 1, ref, NULL);
fullpath + 1, NULL, NULL);
if (IS_ERR(mountdata)) {
kfree(devname);
return (struct vfsmount *)mountdata;
......@@ -278,28 +289,16 @@ static struct vfsmount *cifs_dfs_do_refmount(struct dentry *mntpt,
return mnt;
}
static void dump_referral(const struct dfs_info3_param *ref)
{
cifs_dbg(FYI, "DFS: ref path: %s\n", ref->path_name);
cifs_dbg(FYI, "DFS: node path: %s\n", ref->node_name);
cifs_dbg(FYI, "DFS: fl: %d, srv_type: %d\n",
ref->flags, ref->server_type);
cifs_dbg(FYI, "DFS: ref_flags: %d, path_consumed: %d\n",
ref->ref_flag, ref->path_consumed);
}
/*
* Create a vfsmount that we can automount
*/
static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
{
struct dfs_info3_param referral = {0};
struct cifs_sb_info *cifs_sb;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
char *full_path, *root_path;
unsigned int xid;
int len;
int rc;
struct vfsmount *mnt;
......@@ -357,7 +356,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
if (!rc) {
rc = dfs_cache_find(xid, ses, cifs_sb->local_nls,
cifs_remap(cifs_sb), full_path + 1,
&referral, NULL);
NULL, NULL);
}
free_xid(xid);
......@@ -366,26 +365,16 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
mnt = ERR_PTR(rc);
goto free_root_path;
}
dump_referral(&referral);
len = strlen(referral.node_name);
if (len < 2) {
cifs_dbg(VFS, "%s: Net Address path too short: %s\n",
__func__, referral.node_name);
mnt = ERR_PTR(-EINVAL);
goto free_dfs_ref;
}
/*
* cifs_mount() will retry every available node server in case
* of failures.
* OK - we were able to get and cache a referral for @full_path.
*
* Now, pass it down to cifs_mount() and it will retry every available
* node server in case of failures - no need to do it here.
*/
mnt = cifs_dfs_do_refmount(mntpt, cifs_sb, full_path, &referral);
cifs_dbg(FYI, "%s: cifs_dfs_do_refmount:%s , mnt:%p\n", __func__,
referral.node_name, mnt);
mnt = cifs_dfs_do_mount(mntpt, cifs_sb, full_path);
cifs_dbg(FYI, "%s: cifs_dfs_do_mount:%s , mnt:%p\n", __func__,
full_path + 1, mnt);
free_dfs_ref:
free_dfs_info_param(&referral);
free_root_path:
kfree(root_path);
free_full_path:
......
......@@ -802,6 +802,26 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
return;
}
unsigned int setup_authusers_ACE(struct cifs_ace *pntace)
{
int i;
unsigned int ace_size = 20;
pntace->type = ACCESS_ALLOWED_ACE_TYPE;
pntace->flags = 0x0;
pntace->access_req = cpu_to_le32(GENERIC_ALL);
pntace->sid.num_subauth = 1;
pntace->sid.revision = 1;
for (i = 0; i < NUM_AUTHS; i++)
pntace->sid.authority[i] = sid_authusers.authority[i];
pntace->sid.sub_auth[0] = sid_authusers.sub_auth[0];
/* size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth*4) */
pntace->size = cpu_to_le16(ace_size);
return ace_size;
}
/*
* Fill in the special SID based on the mode. See
* http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx
......
......@@ -149,6 +149,9 @@ extern ssize_t cifs_file_copychunk_range(unsigned int xid,
size_t len, unsigned int flags);
extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern void cifs_setsize(struct inode *inode, loff_t offset);
extern int cifs_truncate_page(struct address_space *mapping, loff_t from);
#ifdef CONFIG_CIFS_NFSD_EXPORT
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
......
......@@ -1588,6 +1588,7 @@ struct mid_q_entry {
mid_callback_t *callback; /* call completion callback */
mid_handle_t *handle; /* call handle mid callback */
void *callback_data; /* general purpose pointer for callback */
struct task_struct *creator;
void *resp_buf; /* pointer to received SMB header */
unsigned int resp_buf_size;
int mid_state; /* wish this were enum but can not pass to wait_event */
......
......@@ -213,6 +213,7 @@ extern struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *,
const struct cifs_fid *, u32 *);
extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *,
const char *, int);
extern unsigned int setup_authusers_ACE(struct cifs_ace *pace);
extern unsigned int setup_special_mode_ACE(struct cifs_ace *pace, __u64 nmode);
extern void dequeue_mid(struct mid_q_entry *mid, bool malformed);
......@@ -596,6 +597,9 @@ bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface);
void extract_unc_hostname(const char *unc, const char **h, size_t *len);
int copy_path_name(char *dst, const char *src);
int smb2_parse_query_directory(struct cifs_tcon *tcon, struct kvec *rsp_iov,
int resp_buftype,
struct cifs_search_info *srch_inf);
#ifdef CONFIG_CIFS_DFS_UPCALL
static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
......
......@@ -4619,7 +4619,7 @@ CIFSFindFirst(const unsigned int xid, struct cifs_tcon *tcon,
psrch_inf->unicode = false;
psrch_inf->ntwrk_buf_start = (char *)pSMBr;
psrch_inf->smallBuf = 0;
psrch_inf->smallBuf = false;
psrch_inf->srch_entries_start =
(char *) &pSMBr->hdr.Protocol +
le16_to_cpu(pSMBr->t2.DataOffset);
......@@ -4753,7 +4753,7 @@ int CIFSFindNext(const unsigned int xid, struct cifs_tcon *tcon,
cifs_buf_release(psrch_inf->ntwrk_buf_start);
psrch_inf->srch_entries_start = response_data;
psrch_inf->ntwrk_buf_start = (char *)pSMB;
psrch_inf->smallBuf = 0;
psrch_inf->smallBuf = false;
if (parms->EndofSearch)
psrch_inf->endOfSearch = true;
else
......
......@@ -3709,8 +3709,10 @@ match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
{
struct cifs_sb_info *old = CIFS_SB(sb);
struct cifs_sb_info *new = mnt_data->cifs_sb;
bool old_set = old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH;
bool new_set = new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH;
bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
old->prepath;
bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
new->prepath;
if (old_set && new_set && !strcmp(new->prepath, old->prepath))
return 1;
......
......@@ -5,8 +5,6 @@
* Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
*/
#include <linux/rcupdate.h>
#include <linux/rculist.h>
#include <linux/jhash.h>
#include <linux/ktime.h>
#include <linux/slab.h>
......@@ -22,67 +20,68 @@
#include "dfs_cache.h"
#define DFS_CACHE_HTABLE_SIZE 32
#define DFS_CACHE_MAX_ENTRIES 64
#define CACHE_HTABLE_SIZE 32
#define CACHE_MAX_ENTRIES 64
#define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \
DFSREF_STORAGE_SERVER))
struct dfs_cache_tgt {
char *t_name;
struct list_head t_list;
struct cache_dfs_tgt {
char *name;
struct list_head list;
};
struct dfs_cache_entry {
struct hlist_node ce_hlist;
const char *ce_path;
int ce_ttl;
int ce_srvtype;
int ce_flags;
struct timespec64 ce_etime;
int ce_path_consumed;
int ce_numtgts;
struct list_head ce_tlist;
struct dfs_cache_tgt *ce_tgthint;
struct rcu_head ce_rcu;
struct cache_entry {
struct hlist_node hlist;
const char *path;
int ttl;
int srvtype;
int flags;
struct timespec64 etime;
int path_consumed;
int numtgts;
struct list_head tlist;
struct cache_dfs_tgt *tgthint;
};
static struct kmem_cache *dfs_cache_slab __read_mostly;
struct dfs_cache_vol_info {
char *vi_fullpath;
struct smb_vol vi_vol;
char *vi_mntdata;
struct list_head vi_list;
struct vol_info {
char *fullpath;
spinlock_t smb_vol_lock;
struct smb_vol smb_vol;
char *mntdata;
struct list_head list;
struct list_head rlist;
struct kref refcnt;
};
struct dfs_cache {
struct mutex dc_lock;
struct nls_table *dc_nlsc;
struct list_head dc_vol_list;
int dc_ttl;
struct delayed_work dc_refresh;
};
static struct kmem_cache *cache_slab __read_mostly;
static struct workqueue_struct *dfscache_wq __read_mostly;
static struct dfs_cache dfs_cache;
static int cache_ttl;
static DEFINE_SPINLOCK(cache_ttl_lock);
static struct nls_table *cache_nlsc;
/*
* Number of entries in the cache
*/
static size_t dfs_cache_count;
static atomic_t cache_count;
static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
static DECLARE_RWSEM(htable_rw_lock);
static DEFINE_MUTEX(dfs_cache_list_lock);
static struct hlist_head dfs_cache_htable[DFS_CACHE_HTABLE_SIZE];
static LIST_HEAD(vol_list);
static DEFINE_SPINLOCK(vol_list_lock);
static void refresh_cache_worker(struct work_struct *work);
static inline bool is_path_valid(const char *path)
{
return path && (strchr(path + 1, '\\') || strchr(path + 1, '/'));
}
static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
static inline int get_normalized_path(const char *path, char **npath)
static int get_normalized_path(const char *path, char **npath)
{
if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
return -EINVAL;
if (*path == '\\') {
*npath = (char *)path;
} else {
......@@ -100,57 +99,48 @@ static inline void free_normalized_path(const char *path, char *npath)
kfree(npath);
}
static inline bool cache_entry_expired(const struct dfs_cache_entry *ce)
static inline bool cache_entry_expired(const struct cache_entry *ce)
{
struct timespec64 ts;
ktime_get_coarse_real_ts64(&ts);
return timespec64_compare(&ts, &ce->ce_etime) >= 0;
return timespec64_compare(&ts, &ce->etime) >= 0;
}
static inline void free_tgts(struct dfs_cache_entry *ce)
static inline void free_tgts(struct cache_entry *ce)
{
struct dfs_cache_tgt *t, *n;
struct cache_dfs_tgt *t, *n;
list_for_each_entry_safe(t, n, &ce->ce_tlist, t_list) {
list_del(&t->t_list);
kfree(t->t_name);
list_for_each_entry_safe(t, n, &ce->tlist, list) {
list_del(&t->list);
kfree(t->name);
kfree(t);
}
}
static void free_cache_entry(struct rcu_head *rcu)
static inline void flush_cache_ent(struct cache_entry *ce)
{
struct dfs_cache_entry *ce = container_of(rcu, struct dfs_cache_entry,
ce_rcu);
kmem_cache_free(dfs_cache_slab, ce);
}
static inline void flush_cache_ent(struct dfs_cache_entry *ce)
{
if (hlist_unhashed(&ce->ce_hlist))
return;
hlist_del_init_rcu(&ce->ce_hlist);
kfree_const(ce->ce_path);
hlist_del_init(&ce->hlist);
kfree(ce->path);
free_tgts(ce);
dfs_cache_count--;
call_rcu(&ce->ce_rcu, free_cache_entry);
atomic_dec(&cache_count);
kmem_cache_free(cache_slab, ce);
}
static void flush_cache_ents(void)
{
int i;
rcu_read_lock();
for (i = 0; i < DFS_CACHE_HTABLE_SIZE; i++) {
struct hlist_head *l = &dfs_cache_htable[i];
struct dfs_cache_entry *ce;
for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
struct hlist_head *l = &cache_htable[i];
struct hlist_node *n;
struct cache_entry *ce;
hlist_for_each_entry_rcu(ce, l, ce_hlist)
flush_cache_ent(ce);
hlist_for_each_entry_safe(ce, n, l, hlist) {
if (!hlist_unhashed(&ce->hlist))
flush_cache_ent(ce);
}
}
rcu_read_unlock();
}
/*
......@@ -158,36 +148,39 @@ static void flush_cache_ents(void)
*/
static int dfscache_proc_show(struct seq_file *m, void *v)
{
int bucket;
struct dfs_cache_entry *ce;
struct dfs_cache_tgt *t;
int i;
struct cache_entry *ce;
struct cache_dfs_tgt *t;
seq_puts(m, "DFS cache\n---------\n");
mutex_lock(&dfs_cache_list_lock);
rcu_read_lock();
hash_for_each_rcu(dfs_cache_htable, bucket, ce, ce_hlist) {
seq_printf(m,
"cache entry: path=%s,type=%s,ttl=%d,etime=%ld,"
"interlink=%s,path_consumed=%d,expired=%s\n",
ce->ce_path,
ce->ce_srvtype == DFS_TYPE_ROOT ? "root" : "link",
ce->ce_ttl, ce->ce_etime.tv_nsec,
IS_INTERLINK_SET(ce->ce_flags) ? "yes" : "no",
ce->ce_path_consumed,
cache_entry_expired(ce) ? "yes" : "no");
list_for_each_entry(t, &ce->ce_tlist, t_list) {
seq_printf(m, " %s%s\n",
t->t_name,
ce->ce_tgthint == t ? " (target hint)" : "");
down_read(&htable_rw_lock);
for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
struct hlist_head *l = &cache_htable[i];
hlist_for_each_entry(ce, l, hlist) {
if (hlist_unhashed(&ce->hlist))
continue;
seq_printf(m,
"cache entry: path=%s,type=%s,ttl=%d,etime=%ld,"
"interlink=%s,path_consumed=%d,expired=%s\n",
ce->path,
ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
ce->ttl, ce->etime.tv_nsec,
IS_INTERLINK_SET(ce->flags) ? "yes" : "no",
ce->path_consumed,
cache_entry_expired(ce) ? "yes" : "no");
list_for_each_entry(t, &ce->tlist, list) {
seq_printf(m, " %s%s\n",
t->name,
ce->tgthint == t ? " (target hint)" : "");
}
}
}
rcu_read_unlock();
up_read(&htable_rw_lock);
mutex_unlock(&dfs_cache_list_lock);
return 0;
}
......@@ -205,9 +198,10 @@ static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
return -EINVAL;
cifs_dbg(FYI, "clearing dfs cache");
mutex_lock(&dfs_cache_list_lock);
down_write(&htable_rw_lock);
flush_cache_ents();
mutex_unlock(&dfs_cache_list_lock);
up_write(&htable_rw_lock);
return count;
}
......@@ -226,25 +220,25 @@ const struct file_operations dfscache_proc_fops = {
};
#ifdef CONFIG_CIFS_DEBUG2
static inline void dump_tgts(const struct dfs_cache_entry *ce)
static inline void dump_tgts(const struct cache_entry *ce)
{
struct dfs_cache_tgt *t;
struct cache_dfs_tgt *t;
cifs_dbg(FYI, "target list:\n");
list_for_each_entry(t, &ce->ce_tlist, t_list) {
cifs_dbg(FYI, " %s%s\n", t->t_name,
ce->ce_tgthint == t ? " (target hint)" : "");
list_for_each_entry(t, &ce->tlist, list) {
cifs_dbg(FYI, " %s%s\n", t->name,
ce->tgthint == t ? " (target hint)" : "");
}
}
static inline void dump_ce(const struct dfs_cache_entry *ce)
static inline void dump_ce(const struct cache_entry *ce)
{
cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,"
"interlink=%s,path_consumed=%d,expired=%s\n", ce->ce_path,
ce->ce_srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ce_ttl,
ce->ce_etime.tv_nsec,
IS_INTERLINK_SET(ce->ce_flags) ? "yes" : "no",
ce->ce_path_consumed,
"interlink=%s,path_consumed=%d,expired=%s\n", ce->path,
ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
ce->etime.tv_nsec,
IS_INTERLINK_SET(ce->flags) ? "yes" : "no",
ce->path_consumed,
cache_entry_expired(ce) ? "yes" : "no");
dump_tgts(ce);
}
......@@ -284,25 +278,34 @@ static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
*/
int dfs_cache_init(void)
{
int rc;
int i;
dfs_cache_slab = kmem_cache_create("cifs_dfs_cache",
sizeof(struct dfs_cache_entry), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!dfs_cache_slab)
dfscache_wq = alloc_workqueue("cifs-dfscache",
WQ_FREEZABLE | WQ_MEM_RECLAIM, 1);
if (!dfscache_wq)
return -ENOMEM;
for (i = 0; i < DFS_CACHE_HTABLE_SIZE; i++)
INIT_HLIST_HEAD(&dfs_cache_htable[i]);
cache_slab = kmem_cache_create("cifs_dfs_cache",
sizeof(struct cache_entry), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!cache_slab) {
rc = -ENOMEM;
goto out_destroy_wq;
}
for (i = 0; i < CACHE_HTABLE_SIZE; i++)
INIT_HLIST_HEAD(&cache_htable[i]);
INIT_LIST_HEAD(&dfs_cache.dc_vol_list);
mutex_init(&dfs_cache.dc_lock);
INIT_DELAYED_WORK(&dfs_cache.dc_refresh, refresh_cache_worker);
dfs_cache.dc_ttl = -1;
dfs_cache.dc_nlsc = load_nls_default();
atomic_set(&cache_count, 0);
cache_nlsc = load_nls_default();
cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
return 0;
out_destroy_wq:
destroy_workqueue(dfscache_wq);
return rc;
}
static inline unsigned int cache_entry_hash(const void *data, int size)
......@@ -310,7 +313,7 @@ static inline unsigned int cache_entry_hash(const void *data, int size)
unsigned int h;
h = jhash(data, size, 0);
return h & (DFS_CACHE_HTABLE_SIZE - 1);
return h & (CACHE_HTABLE_SIZE - 1);
}
/* Check whether second path component of @path is SYSVOL or NETLOGON */
......@@ -325,11 +328,11 @@ static inline bool is_sysvol_or_netlogon(const char *path)
}
/* Return target hint of a DFS cache entry */
static inline char *get_tgt_name(const struct dfs_cache_entry *ce)
static inline char *get_tgt_name(const struct cache_entry *ce)
{
struct dfs_cache_tgt *t = ce->ce_tgthint;
struct cache_dfs_tgt *t = ce->tgthint;
return t ? t->t_name : ERR_PTR(-ENOENT);
return t ? t->name : ERR_PTR(-ENOENT);
}
/* Return expire time out of a new entry's TTL */
......@@ -346,19 +349,19 @@ static inline struct timespec64 get_expire_time(int ttl)
}
/* Allocate a new DFS target */
static inline struct dfs_cache_tgt *alloc_tgt(const char *name)
static struct cache_dfs_tgt *alloc_target(const char *name)
{
struct dfs_cache_tgt *t;
struct cache_dfs_tgt *t;
t = kmalloc(sizeof(*t), GFP_KERNEL);
t = kmalloc(sizeof(*t), GFP_ATOMIC);
if (!t)
return ERR_PTR(-ENOMEM);
t->t_name = kstrndup(name, strlen(name), GFP_KERNEL);
if (!t->t_name) {
t->name = kstrndup(name, strlen(name), GFP_ATOMIC);
if (!t->name) {
kfree(t);
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&t->t_list);
INIT_LIST_HEAD(&t->list);
return t;
}
......@@ -367,180 +370,184 @@ static inline struct dfs_cache_tgt *alloc_tgt(const char *name)
* target hint.
*/
static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
struct dfs_cache_entry *ce, const char *tgthint)
struct cache_entry *ce, const char *tgthint)
{
int i;
ce->ce_ttl = refs[0].ttl;
ce->ce_etime = get_expire_time(ce->ce_ttl);
ce->ce_srvtype = refs[0].server_type;
ce->ce_flags = refs[0].ref_flag;
ce->ce_path_consumed = refs[0].path_consumed;
ce->ttl = refs[0].ttl;
ce->etime = get_expire_time(ce->ttl);
ce->srvtype = refs[0].server_type;
ce->flags = refs[0].ref_flag;
ce->path_consumed = refs[0].path_consumed;
for (i = 0; i < numrefs; i++) {
struct dfs_cache_tgt *t;
struct cache_dfs_tgt *t;
t = alloc_tgt(refs[i].node_name);
t = alloc_target(refs[i].node_name);
if (IS_ERR(t)) {
free_tgts(ce);
return PTR_ERR(t);
}
if (tgthint && !strcasecmp(t->t_name, tgthint)) {
list_add(&t->t_list, &ce->ce_tlist);
if (tgthint && !strcasecmp(t->name, tgthint)) {
list_add(&t->list, &ce->tlist);
tgthint = NULL;
} else {
list_add_tail(&t->t_list, &ce->ce_tlist);
list_add_tail(&t->list, &ce->tlist);
}
ce->ce_numtgts++;
ce->numtgts++;
}
ce->ce_tgthint = list_first_entry_or_null(&ce->ce_tlist,
struct dfs_cache_tgt, t_list);
ce->tgthint = list_first_entry_or_null(&ce->tlist,
struct cache_dfs_tgt, list);
return 0;
}
/* Allocate a new cache entry */
static struct dfs_cache_entry *
alloc_cache_entry(const char *path, const struct dfs_info3_param *refs,
int numrefs)
static struct cache_entry *alloc_cache_entry(const char *path,
const struct dfs_info3_param *refs,
int numrefs)
{
struct dfs_cache_entry *ce;
struct cache_entry *ce;
int rc;
ce = kmem_cache_zalloc(dfs_cache_slab, GFP_KERNEL);
ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
if (!ce)
return ERR_PTR(-ENOMEM);
ce->ce_path = kstrdup_const(path, GFP_KERNEL);
if (!ce->ce_path) {
kmem_cache_free(dfs_cache_slab, ce);
ce->path = kstrndup(path, strlen(path), GFP_KERNEL);
if (!ce->path) {
kmem_cache_free(cache_slab, ce);
return ERR_PTR(-ENOMEM);
}
INIT_HLIST_NODE(&ce->ce_hlist);
INIT_LIST_HEAD(&ce->ce_tlist);
INIT_HLIST_NODE(&ce->hlist);
INIT_LIST_HEAD(&ce->tlist);
rc = copy_ref_data(refs, numrefs, ce, NULL);
if (rc) {
kfree_const(ce->ce_path);
kmem_cache_free(dfs_cache_slab, ce);
kfree(ce->path);
kmem_cache_free(cache_slab, ce);
ce = ERR_PTR(rc);
}
return ce;
}
/* Must be called with htable_rw_lock held */
static void remove_oldest_entry(void)
{
int bucket;
struct dfs_cache_entry *ce;
struct dfs_cache_entry *to_del = NULL;
rcu_read_lock();
hash_for_each_rcu(dfs_cache_htable, bucket, ce, ce_hlist) {
if (!to_del || timespec64_compare(&ce->ce_etime,
&to_del->ce_etime) < 0)
to_del = ce;
int i;
struct cache_entry *ce;
struct cache_entry *to_del = NULL;
for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
struct hlist_head *l = &cache_htable[i];
hlist_for_each_entry(ce, l, hlist) {
if (hlist_unhashed(&ce->hlist))
continue;
if (!to_del || timespec64_compare(&ce->etime,
&to_del->etime) < 0)
to_del = ce;
}
}
if (!to_del) {
cifs_dbg(FYI, "%s: no entry to remove", __func__);
goto out;
return;
}
cifs_dbg(FYI, "%s: removing entry", __func__);
dump_ce(to_del);
flush_cache_ent(to_del);
out:
rcu_read_unlock();
}
/* Add a new DFS cache entry */
static inline struct dfs_cache_entry *
add_cache_entry(unsigned int hash, const char *path,
const struct dfs_info3_param *refs, int numrefs)
static int add_cache_entry(const char *path, unsigned int hash,
struct dfs_info3_param *refs, int numrefs)
{
struct dfs_cache_entry *ce;
struct cache_entry *ce;
ce = alloc_cache_entry(path, refs, numrefs);
if (IS_ERR(ce))
return ce;
return PTR_ERR(ce);
hlist_add_head_rcu(&ce->ce_hlist, &dfs_cache_htable[hash]);
mutex_lock(&dfs_cache.dc_lock);
if (dfs_cache.dc_ttl < 0) {
dfs_cache.dc_ttl = ce->ce_ttl;
queue_delayed_work(cifsiod_wq, &dfs_cache.dc_refresh,
dfs_cache.dc_ttl * HZ);
spin_lock(&cache_ttl_lock);
if (!cache_ttl) {
cache_ttl = ce->ttl;
queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
} else {
dfs_cache.dc_ttl = min_t(int, dfs_cache.dc_ttl, ce->ce_ttl);
mod_delayed_work(cifsiod_wq, &dfs_cache.dc_refresh,
dfs_cache.dc_ttl * HZ);
cache_ttl = min_t(int, cache_ttl, ce->ttl);
mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
}
mutex_unlock(&dfs_cache.dc_lock);
spin_unlock(&cache_ttl_lock);
return ce;
down_write(&htable_rw_lock);
hlist_add_head(&ce->hlist, &cache_htable[hash]);
dump_ce(ce);
up_write(&htable_rw_lock);
return 0;
}
static struct dfs_cache_entry *__find_cache_entry(unsigned int hash,
const char *path)
/*
* Find a DFS cache entry in hash table and optionally check prefix path against
* @path.
* Use whole path components in the match.
* Must be called with htable_rw_lock held.
*
* Return ERR_PTR(-ENOENT) if the entry is not found.
*/
static struct cache_entry *lookup_cache_entry(const char *path,
unsigned int *hash)
{
struct dfs_cache_entry *ce;
struct cache_entry *ce;
unsigned int h;
bool found = false;
rcu_read_lock();
hlist_for_each_entry_rcu(ce, &dfs_cache_htable[hash], ce_hlist) {
if (!strcasecmp(path, ce->ce_path)) {
#ifdef CONFIG_CIFS_DEBUG2
char *name = get_tgt_name(ce);
h = cache_entry_hash(path, strlen(path));
if (IS_ERR(name)) {
rcu_read_unlock();
return ERR_CAST(name);
}
cifs_dbg(FYI, "%s: cache hit\n", __func__);
cifs_dbg(FYI, "%s: target hint: %s\n", __func__, name);
#endif
hlist_for_each_entry(ce, &cache_htable[h], hlist) {
if (!strcasecmp(path, ce->path)) {
found = true;
dump_ce(ce);
break;
}
}
rcu_read_unlock();
return found ? ce : ERR_PTR(-ENOENT);
}
/*
* Find a DFS cache entry in hash table and optionally check prefix path against
* @path.
* Use whole path components in the match.
* Return ERR_PTR(-ENOENT) if the entry is not found.
*/
static inline struct dfs_cache_entry *find_cache_entry(const char *path,
unsigned int *hash)
{
*hash = cache_entry_hash(path, strlen(path));
return __find_cache_entry(*hash, path);
if (!found)
ce = ERR_PTR(-ENOENT);
if (hash)
*hash = h;
return ce;
}
static inline void destroy_slab_cache(void)
static void __vol_release(struct vol_info *vi)
{
rcu_barrier();
kmem_cache_destroy(dfs_cache_slab);
kfree(vi->fullpath);
kfree(vi->mntdata);
cifs_cleanup_volume_info_contents(&vi->smb_vol);
kfree(vi);
}
static inline void free_vol(struct dfs_cache_vol_info *vi)
static void vol_release(struct kref *kref)
{
list_del(&vi->vi_list);
kfree(vi->vi_fullpath);
kfree(vi->vi_mntdata);
cifs_cleanup_volume_info_contents(&vi->vi_vol);
kfree(vi);
struct vol_info *vi = container_of(kref, struct vol_info, refcnt);
spin_lock(&vol_list_lock);
list_del(&vi->list);
spin_unlock(&vol_list_lock);
__vol_release(vi);
}
static inline void free_vol_list(void)
{
struct dfs_cache_vol_info *vi, *nvi;
struct vol_info *vi, *nvi;
list_for_each_entry_safe(vi, nvi, &dfs_cache.dc_vol_list, vi_list)
free_vol(vi);
list_for_each_entry_safe(vi, nvi, &vol_list, list) {
list_del_init(&vi->list);
__vol_release(vi);
}
}
/**
......@@ -548,83 +555,78 @@ static inline void free_vol_list(void)
*/
void dfs_cache_destroy(void)
{
cancel_delayed_work_sync(&dfs_cache.dc_refresh);
unload_nls(dfs_cache.dc_nlsc);
cancel_delayed_work_sync(&refresh_task);
unload_nls(cache_nlsc);
free_vol_list();
mutex_destroy(&dfs_cache.dc_lock);
flush_cache_ents();
destroy_slab_cache();
mutex_destroy(&dfs_cache_list_lock);
kmem_cache_destroy(cache_slab);
destroy_workqueue(dfscache_wq);
cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
}
static inline struct dfs_cache_entry *
__update_cache_entry(const char *path, const struct dfs_info3_param *refs,
int numrefs)
/* Must be called with htable_rw_lock held */
static int __update_cache_entry(const char *path,
const struct dfs_info3_param *refs,
int numrefs)
{
int rc;
unsigned int h;
struct dfs_cache_entry *ce;
struct cache_entry *ce;
char *s, *th = NULL;
ce = find_cache_entry(path, &h);
ce = lookup_cache_entry(path, NULL);
if (IS_ERR(ce))
return ce;
return PTR_ERR(ce);
if (ce->ce_tgthint) {
s = ce->ce_tgthint->t_name;
th = kstrndup(s, strlen(s), GFP_KERNEL);
if (ce->tgthint) {
s = ce->tgthint->name;
th = kstrndup(s, strlen(s), GFP_ATOMIC);
if (!th)
return ERR_PTR(-ENOMEM);
return -ENOMEM;
}
free_tgts(ce);
ce->ce_numtgts = 0;
ce->numtgts = 0;
rc = copy_ref_data(refs, numrefs, ce, th);
kfree(th);
if (rc)
ce = ERR_PTR(rc);
kfree(th);
return ce;
return rc;
}
/* Update an expired cache entry by getting a new DFS referral from server */
static struct dfs_cache_entry *
update_cache_entry(const unsigned int xid, struct cifs_ses *ses,
const struct nls_table *nls_codepage, int remap,
const char *path, struct dfs_cache_entry *ce)
static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses,
const struct nls_table *nls_codepage, int remap,
const char *path, struct dfs_info3_param **refs,
int *numrefs)
{
int rc;
struct dfs_info3_param *refs = NULL;
int numrefs = 0;
cifs_dbg(FYI, "%s: get an DFS referral for %s\n", __func__, path);
cifs_dbg(FYI, "%s: update expired cache entry\n", __func__);
/*
* Check if caller provided enough parameters to update an expired
* entry.
*/
if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
return ERR_PTR(-ETIME);
return -EOPNOTSUPP;
if (unlikely(!nls_codepage))
return ERR_PTR(-ETIME);
return -EINVAL;
cifs_dbg(FYI, "%s: DFS referral request for %s\n", __func__, path);
*refs = NULL;
*numrefs = 0;
rc = ses->server->ops->get_dfs_refer(xid, ses, path, &refs, &numrefs,
nls_codepage, remap);
if (rc)
ce = ERR_PTR(rc);
else
ce = __update_cache_entry(path, refs, numrefs);
return ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs,
nls_codepage, remap);
}
dump_refs(refs, numrefs);
free_dfs_info_array(refs, numrefs);
/* Update an expired cache entry by getting a new DFS referral from server */
static int update_cache_entry(const char *path,
const struct dfs_info3_param *refs,
int numrefs)
{
return ce;
int rc;
down_write(&htable_rw_lock);
rc = __update_cache_entry(path, refs, numrefs);
up_write(&htable_rw_lock);
return rc;
}
/*
......@@ -636,95 +638,86 @@ update_cache_entry(const unsigned int xid, struct cifs_ses *ses,
* For interlinks, __cifs_dfs_mount() and expand_dfs_referral() are supposed to
* handle them properly.
*/
static struct dfs_cache_entry *
do_dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
const struct nls_table *nls_codepage, int remap,
const char *path, bool noreq)
static int __dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
const struct nls_table *nls_codepage, int remap,
const char *path, bool noreq)
{
int rc;
unsigned int h;
struct dfs_cache_entry *ce;
struct dfs_info3_param *nrefs;
int numnrefs;
unsigned int hash;
struct cache_entry *ce;
struct dfs_info3_param *refs = NULL;
int numrefs = 0;
bool newent = false;
cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
ce = find_cache_entry(path, &h);
if (IS_ERR(ce)) {
cifs_dbg(FYI, "%s: cache miss\n", __func__);
/*
* If @noreq is set, no requests will be sent to the server for
* either updating or getting a new DFS referral.
*/
if (noreq)
return ce;
/*
* No cache entry was found, so check for valid parameters that
* will be required to get a new DFS referral and then create a
* new cache entry.
*/
if (!ses || !ses->server || !ses->server->ops->get_dfs_refer) {
ce = ERR_PTR(-EOPNOTSUPP);
return ce;
}
if (unlikely(!nls_codepage)) {
ce = ERR_PTR(-EINVAL);
return ce;
}
down_read(&htable_rw_lock);
nrefs = NULL;
numnrefs = 0;
ce = lookup_cache_entry(path, &hash);
cifs_dbg(FYI, "%s: DFS referral request for %s\n", __func__,
path);
/*
* If @noreq is set, no requests will be sent to the server. Just return
* the cache entry.
*/
if (noreq) {
up_read(&htable_rw_lock);
return PTR_ERR_OR_ZERO(ce);
}
rc = ses->server->ops->get_dfs_refer(xid, ses, path, &nrefs,
&numnrefs, nls_codepage,
remap);
if (rc) {
ce = ERR_PTR(rc);
return ce;
if (!IS_ERR(ce)) {
if (!cache_entry_expired(ce)) {
dump_ce(ce);
up_read(&htable_rw_lock);
return 0;
}
} else {
newent = true;
}
dump_refs(nrefs, numnrefs);
up_read(&htable_rw_lock);
cifs_dbg(FYI, "%s: new cache entry\n", __func__);
/*
* No entry was found.
*
* Request a new DFS referral in order to create a new cache entry, or
* updating an existing one.
*/
rc = get_dfs_referral(xid, ses, nls_codepage, remap, path,
&refs, &numrefs);
if (rc)
return rc;
if (dfs_cache_count >= DFS_CACHE_MAX_ENTRIES) {
cifs_dbg(FYI, "%s: reached max cache size (%d)",
__func__, DFS_CACHE_MAX_ENTRIES);
remove_oldest_entry();
}
ce = add_cache_entry(h, path, nrefs, numnrefs);
free_dfs_info_array(nrefs, numnrefs);
dump_refs(refs, numrefs);
if (IS_ERR(ce))
return ce;
if (!newent) {
rc = update_cache_entry(path, refs, numrefs);
goto out_free_refs;
}
dfs_cache_count++;
if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
cifs_dbg(FYI, "%s: reached max cache size (%d)", __func__,
CACHE_MAX_ENTRIES);
down_write(&htable_rw_lock);
remove_oldest_entry();
up_write(&htable_rw_lock);
}
dump_ce(ce);
rc = add_cache_entry(path, hash, refs, numrefs);
if (!rc)
atomic_inc(&cache_count);
/* Just return the found cache entry in case @noreq is set */
if (noreq)
return ce;
if (cache_entry_expired(ce)) {
cifs_dbg(FYI, "%s: expired cache entry\n", __func__);
ce = update_cache_entry(xid, ses, nls_codepage, remap, path,
ce);
if (IS_ERR(ce)) {
cifs_dbg(FYI, "%s: failed to update expired entry\n",
__func__);
}
}
return ce;
out_free_refs:
free_dfs_info_array(refs, numrefs);
return rc;
}
/* Set up a new DFS referral from a given cache entry */
static int setup_ref(const char *path, const struct dfs_cache_entry *ce,
struct dfs_info3_param *ref, const char *tgt)
/*
* Set up a DFS referral from a given cache entry.
*
* Must be called with htable_rw_lock held.
*/
static int setup_referral(const char *path, struct cache_entry *ce,
struct dfs_info3_param *ref, const char *target)
{
int rc;
......@@ -732,21 +725,20 @@ static int setup_ref(const char *path, const struct dfs_cache_entry *ce,
memset(ref, 0, sizeof(*ref));
ref->path_name = kstrndup(path, strlen(path), GFP_KERNEL);
ref->path_name = kstrndup(path, strlen(path), GFP_ATOMIC);
if (!ref->path_name)
return -ENOMEM;
ref->path_consumed = ce->ce_path_consumed;
ref->node_name = kstrndup(tgt, strlen(tgt), GFP_KERNEL);
ref->node_name = kstrndup(target, strlen(target), GFP_ATOMIC);
if (!ref->node_name) {
rc = -ENOMEM;
goto err_free_path;
}
ref->ttl = ce->ce_ttl;
ref->server_type = ce->ce_srvtype;
ref->ref_flag = ce->ce_flags;
ref->path_consumed = ce->path_consumed;
ref->ttl = ce->ttl;
ref->server_type = ce->srvtype;
ref->ref_flag = ce->flags;
return 0;
......@@ -757,38 +749,37 @@ static int setup_ref(const char *path, const struct dfs_cache_entry *ce,
}
/* Return target list of a DFS cache entry */
static int get_tgt_list(const struct dfs_cache_entry *ce,
struct dfs_cache_tgt_list *tl)
static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
{
int rc;
struct list_head *head = &tl->tl_list;
struct dfs_cache_tgt *t;
struct cache_dfs_tgt *t;
struct dfs_cache_tgt_iterator *it, *nit;
memset(tl, 0, sizeof(*tl));
INIT_LIST_HEAD(head);
list_for_each_entry(t, &ce->ce_tlist, t_list) {
it = kzalloc(sizeof(*it), GFP_KERNEL);
list_for_each_entry(t, &ce->tlist, list) {
it = kzalloc(sizeof(*it), GFP_ATOMIC);
if (!it) {
rc = -ENOMEM;
goto err_free_it;
}
it->it_name = kstrndup(t->t_name, strlen(t->t_name),
GFP_KERNEL);
it->it_name = kstrndup(t->name, strlen(t->name), GFP_ATOMIC);
if (!it->it_name) {
kfree(it);
rc = -ENOMEM;
goto err_free_it;
}
if (ce->ce_tgthint == t)
if (ce->tgthint == t)
list_add(&it->it_list, head);
else
list_add_tail(&it->it_list, head);
}
tl->tl_numtgts = ce->ce_numtgts;
tl->tl_numtgts = ce->numtgts;
return 0;
......@@ -829,28 +820,35 @@ int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
{
int rc;
char *npath;
struct dfs_cache_entry *ce;
if (unlikely(!is_path_valid(path)))
return -EINVAL;
struct cache_entry *ce;
rc = get_normalized_path(path, &npath);
if (rc)
return rc;
mutex_lock(&dfs_cache_list_lock);
ce = do_dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
if (!IS_ERR(ce)) {
if (ref)
rc = setup_ref(path, ce, ref, get_tgt_name(ce));
else
rc = 0;
if (!rc && tgt_list)
rc = get_tgt_list(ce, tgt_list);
} else {
rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
if (rc)
goto out_free_path;
down_read(&htable_rw_lock);
ce = lookup_cache_entry(npath, NULL);
if (IS_ERR(ce)) {
up_read(&htable_rw_lock);
rc = PTR_ERR(ce);
goto out_free_path;
}
mutex_unlock(&dfs_cache_list_lock);
if (ref)
rc = setup_referral(path, ce, ref, get_tgt_name(ce));
else
rc = 0;
if (!rc && tgt_list)
rc = get_targets(ce, tgt_list);
up_read(&htable_rw_lock);
out_free_path:
free_normalized_path(path, npath);
return rc;
}
......@@ -876,31 +874,33 @@ int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
{
int rc;
char *npath;
struct dfs_cache_entry *ce;
if (unlikely(!is_path_valid(path)))
return -EINVAL;
struct cache_entry *ce;
rc = get_normalized_path(path, &npath);
if (rc)
return rc;
mutex_lock(&dfs_cache_list_lock);
ce = do_dfs_cache_find(0, NULL, NULL, 0, npath, true);
cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
down_read(&htable_rw_lock);
ce = lookup_cache_entry(npath, NULL);
if (IS_ERR(ce)) {
rc = PTR_ERR(ce);
goto out;
goto out_unlock;
}
if (ref)
rc = setup_ref(path, ce, ref, get_tgt_name(ce));
rc = setup_referral(path, ce, ref, get_tgt_name(ce));
else
rc = 0;
if (!rc && tgt_list)
rc = get_tgt_list(ce, tgt_list);
out:
mutex_unlock(&dfs_cache_list_lock);
rc = get_targets(ce, tgt_list);
out_unlock:
up_read(&htable_rw_lock);
free_normalized_path(path, npath);
return rc;
}
......@@ -929,44 +929,46 @@ int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
{
int rc;
char *npath;
struct dfs_cache_entry *ce;
struct dfs_cache_tgt *t;
if (unlikely(!is_path_valid(path)))
return -EINVAL;
struct cache_entry *ce;
struct cache_dfs_tgt *t;
rc = get_normalized_path(path, &npath);
if (rc)
return rc;
cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
cifs_dbg(FYI, "%s: update target hint - path: %s\n", __func__, npath);
mutex_lock(&dfs_cache_list_lock);
ce = do_dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
if (rc)
goto out_free_path;
down_write(&htable_rw_lock);
ce = lookup_cache_entry(npath, NULL);
if (IS_ERR(ce)) {
rc = PTR_ERR(ce);
goto out;
goto out_unlock;
}
rc = 0;
t = ce->ce_tgthint;
t = ce->tgthint;
if (likely(!strcasecmp(it->it_name, t->t_name)))
goto out;
if (likely(!strcasecmp(it->it_name, t->name)))
goto out_unlock;
list_for_each_entry(t, &ce->ce_tlist, t_list) {
if (!strcasecmp(t->t_name, it->it_name)) {
ce->ce_tgthint = t;
list_for_each_entry(t, &ce->tlist, list) {
if (!strcasecmp(t->name, it->it_name)) {
ce->tgthint = t;
cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
it->it_name);
break;
}
}
out:
mutex_unlock(&dfs_cache_list_lock);
out_unlock:
up_write(&htable_rw_lock);
out_free_path:
free_normalized_path(path, npath);
return rc;
}
......@@ -989,10 +991,10 @@ int dfs_cache_noreq_update_tgthint(const char *path,
{
int rc;
char *npath;
struct dfs_cache_entry *ce;
struct dfs_cache_tgt *t;
struct cache_entry *ce;
struct cache_dfs_tgt *t;
if (unlikely(!is_path_valid(path)) || !it)
if (!it)
return -EINVAL;
rc = get_normalized_path(path, &npath);
......@@ -1001,33 +1003,33 @@ int dfs_cache_noreq_update_tgthint(const char *path,
cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
mutex_lock(&dfs_cache_list_lock);
down_write(&htable_rw_lock);
ce = do_dfs_cache_find(0, NULL, NULL, 0, npath, true);
ce = lookup_cache_entry(npath, NULL);
if (IS_ERR(ce)) {
rc = PTR_ERR(ce);
goto out;
goto out_unlock;
}
rc = 0;
t = ce->tgthint;
t = ce->ce_tgthint;
if (unlikely(!strcasecmp(it->it_name, t->name)))
goto out_unlock;
if (unlikely(!strcasecmp(it->it_name, t->t_name)))
goto out;
list_for_each_entry(t, &ce->ce_tlist, t_list) {
if (!strcasecmp(t->t_name, it->it_name)) {
ce->ce_tgthint = t;
list_for_each_entry(t, &ce->tlist, list) {
if (!strcasecmp(t->name, it->it_name)) {
ce->tgthint = t;
cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
it->it_name);
break;
}
}
out:
mutex_unlock(&dfs_cache_list_lock);
out_unlock:
up_write(&htable_rw_lock);
free_normalized_path(path, npath);
return rc;
}
......@@ -1047,13 +1049,10 @@ int dfs_cache_get_tgt_referral(const char *path,
{
int rc;
char *npath;
struct dfs_cache_entry *ce;
unsigned int h;
struct cache_entry *ce;
if (!it || !ref)
return -EINVAL;
if (unlikely(!is_path_valid(path)))
return -EINVAL;
rc = get_normalized_path(path, &npath);
if (rc)
......@@ -1061,21 +1060,22 @@ int dfs_cache_get_tgt_referral(const char *path,
cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
mutex_lock(&dfs_cache_list_lock);
down_read(&htable_rw_lock);
ce = find_cache_entry(npath, &h);
ce = lookup_cache_entry(npath, NULL);
if (IS_ERR(ce)) {
rc = PTR_ERR(ce);
goto out;
goto out_unlock;
}
cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
rc = setup_ref(path, ce, ref, it->it_name);
rc = setup_referral(path, ce, ref, it->it_name);
out:
mutex_unlock(&dfs_cache_list_lock);
out_unlock:
up_read(&htable_rw_lock);
free_normalized_path(path, npath);
return rc;
}
......@@ -1085,7 +1085,7 @@ static int dup_vol(struct smb_vol *vol, struct smb_vol *new)
if (vol->username) {
new->username = kstrndup(vol->username, strlen(vol->username),
GFP_KERNEL);
GFP_KERNEL);
if (!new->username)
return -ENOMEM;
}
......@@ -1103,7 +1103,7 @@ static int dup_vol(struct smb_vol *vol, struct smb_vol *new)
}
if (vol->domainname) {
new->domainname = kstrndup(vol->domainname,
strlen(vol->domainname), GFP_KERNEL);
strlen(vol->domainname), GFP_KERNEL);
if (!new->domainname)
goto err_free_unc;
}
......@@ -1150,7 +1150,7 @@ static int dup_vol(struct smb_vol *vol, struct smb_vol *new)
int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath)
{
int rc;
struct dfs_cache_vol_info *vi;
struct vol_info *vi;
if (!vol || !fullpath || !mntdata)
return -EINVAL;
......@@ -1161,38 +1161,41 @@ int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath)
if (!vi)
return -ENOMEM;
vi->vi_fullpath = kstrndup(fullpath, strlen(fullpath), GFP_KERNEL);
if (!vi->vi_fullpath) {
vi->fullpath = kstrndup(fullpath, strlen(fullpath), GFP_KERNEL);
if (!vi->fullpath) {
rc = -ENOMEM;
goto err_free_vi;
}
rc = dup_vol(vol, &vi->vi_vol);
rc = dup_vol(vol, &vi->smb_vol);
if (rc)
goto err_free_fullpath;
vi->vi_mntdata = mntdata;
vi->mntdata = mntdata;
spin_lock_init(&vi->smb_vol_lock);
kref_init(&vi->refcnt);
spin_lock(&vol_list_lock);
list_add_tail(&vi->list, &vol_list);
spin_unlock(&vol_list_lock);
mutex_lock(&dfs_cache.dc_lock);
list_add_tail(&vi->vi_list, &dfs_cache.dc_vol_list);
mutex_unlock(&dfs_cache.dc_lock);
return 0;
err_free_fullpath:
kfree(vi->vi_fullpath);
kfree(vi->fullpath);
err_free_vi:
kfree(vi);
return rc;
}
static inline struct dfs_cache_vol_info *find_vol(const char *fullpath)
/* Must be called with vol_list_lock held */
static struct vol_info *find_vol(const char *fullpath)
{
struct dfs_cache_vol_info *vi;
struct vol_info *vi;
list_for_each_entry(vi, &dfs_cache.dc_vol_list, vi_list) {
cifs_dbg(FYI, "%s: vi->vi_fullpath: %s\n", __func__,
vi->vi_fullpath);
if (!strcasecmp(vi->vi_fullpath, fullpath))
list_for_each_entry(vi, &vol_list, list) {
cifs_dbg(FYI, "%s: vi->fullpath: %s\n", __func__, vi->fullpath);
if (!strcasecmp(vi->fullpath, fullpath))
return vi;
}
return ERR_PTR(-ENOENT);
......@@ -1208,30 +1211,31 @@ static inline struct dfs_cache_vol_info *find_vol(const char *fullpath)
*/
int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server)
{
int rc;
struct dfs_cache_vol_info *vi;
struct vol_info *vi;
if (!fullpath || !server)
return -EINVAL;
cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
mutex_lock(&dfs_cache.dc_lock);
spin_lock(&vol_list_lock);
vi = find_vol(fullpath);
if (IS_ERR(vi)) {
rc = PTR_ERR(vi);
goto out;
spin_unlock(&vol_list_lock);
return PTR_ERR(vi);
}
kref_get(&vi->refcnt);
spin_unlock(&vol_list_lock);
cifs_dbg(FYI, "%s: updating volume info\n", __func__);
memcpy(&vi->vi_vol.dstaddr, &server->dstaddr,
sizeof(vi->vi_vol.dstaddr));
rc = 0;
spin_lock(&vi->smb_vol_lock);
memcpy(&vi->smb_vol.dstaddr, &server->dstaddr,
sizeof(vi->smb_vol.dstaddr));
spin_unlock(&vi->smb_vol_lock);
out:
mutex_unlock(&dfs_cache.dc_lock);
return rc;
kref_put(&vi->refcnt, vol_release);
return 0;
}
/**
......@@ -1241,18 +1245,18 @@ int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server)
*/
void dfs_cache_del_vol(const char *fullpath)
{
struct dfs_cache_vol_info *vi;
struct vol_info *vi;
if (!fullpath || !*fullpath)
return;
cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
mutex_lock(&dfs_cache.dc_lock);
spin_lock(&vol_list_lock);
vi = find_vol(fullpath);
if (!IS_ERR(vi))
free_vol(vi);
mutex_unlock(&dfs_cache.dc_lock);
spin_unlock(&vol_list_lock);
kref_put(&vi->refcnt, vol_release);
}
/* Get all tcons that are within a DFS namespace and can be refreshed */
......@@ -1280,7 +1284,7 @@ static void get_tcons(struct TCP_Server_Info *server, struct list_head *head)
spin_unlock(&cifs_tcp_ses_lock);
}
static inline bool is_dfs_link(const char *path)
static bool is_dfs_link(const char *path)
{
char *s;
......@@ -1290,7 +1294,7 @@ static inline bool is_dfs_link(const char *path)
return !!strchr(s + 1, '\\');
}
static inline char *get_dfs_root(const char *path)
static char *get_dfs_root(const char *path)
{
char *s, *npath;
......@@ -1309,31 +1313,67 @@ static inline char *get_dfs_root(const char *path)
return npath;
}
static inline void put_tcp_server(struct TCP_Server_Info *server)
{
cifs_put_tcp_session(server, 0);
}
static struct TCP_Server_Info *get_tcp_server(struct smb_vol *vol)
{
struct TCP_Server_Info *server;
server = cifs_find_tcp_session(vol);
if (IS_ERR_OR_NULL(server))
return NULL;
spin_lock(&GlobalMid_Lock);
if (server->tcpStatus != CifsGood) {
spin_unlock(&GlobalMid_Lock);
put_tcp_server(server);
return NULL;
}
spin_unlock(&GlobalMid_Lock);
return server;
}
/* Find root SMB session out of a DFS link path */
static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi,
struct cifs_tcon *tcon, const char *path)
static struct cifs_ses *find_root_ses(struct vol_info *vi,
struct cifs_tcon *tcon,
const char *path)
{
char *rpath;
int rc;
struct cache_entry *ce;
struct dfs_info3_param ref = {0};
char *mdata = NULL, *devname = NULL;
struct TCP_Server_Info *server;
struct cifs_ses *ses;
struct smb_vol vol;
struct smb_vol vol = {NULL};
rpath = get_dfs_root(path);
if (IS_ERR(rpath))
return ERR_CAST(rpath);
memset(&vol, 0, sizeof(vol));
down_read(&htable_rw_lock);
ce = lookup_cache_entry(rpath, NULL);
if (IS_ERR(ce)) {
up_read(&htable_rw_lock);
ses = ERR_CAST(ce);
goto out;
}
rc = dfs_cache_noreq_find(rpath, &ref, NULL);
rc = setup_referral(path, ce, &ref, get_tgt_name(ce));
if (rc) {
up_read(&htable_rw_lock);
ses = ERR_PTR(rc);
goto out;
}
mdata = cifs_compose_mount_options(vi->vi_mntdata, rpath, &ref,
up_read(&htable_rw_lock);
mdata = cifs_compose_mount_options(vi->mntdata, rpath, &ref,
&devname);
free_dfs_info_param(&ref);
......@@ -1351,13 +1391,8 @@ static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi,
goto out;
}
server = cifs_find_tcp_session(&vol);
if (IS_ERR_OR_NULL(server)) {
ses = ERR_PTR(-EHOSTDOWN);
goto out;
}
if (server->tcpStatus != CifsGood) {
cifs_put_tcp_session(server, 0);
server = get_tcp_server(&vol);
if (!server) {
ses = ERR_PTR(-EHOSTDOWN);
goto out;
}
......@@ -1373,17 +1408,15 @@ static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi,
}
/* Refresh DFS cache entry from a given tcon */
static void do_refresh_tcon(struct dfs_cache *dc, struct dfs_cache_vol_info *vi,
struct cifs_tcon *tcon)
static int refresh_tcon(struct vol_info *vi, struct cifs_tcon *tcon)
{
int rc = 0;
unsigned int xid;
char *path, *npath;
unsigned int h;
struct dfs_cache_entry *ce;
struct cache_entry *ce;
struct cifs_ses *root_ses = NULL, *ses;
struct dfs_info3_param *refs = NULL;
int numrefs = 0;
struct cifs_ses *root_ses = NULL, *ses;
xid = get_xid();
......@@ -1391,19 +1424,23 @@ static void do_refresh_tcon(struct dfs_cache *dc, struct dfs_cache_vol_info *vi,
rc = get_normalized_path(path, &npath);
if (rc)
goto out;
goto out_free_xid;
mutex_lock(&dfs_cache_list_lock);
ce = find_cache_entry(npath, &h);
mutex_unlock(&dfs_cache_list_lock);
down_read(&htable_rw_lock);
ce = lookup_cache_entry(npath, NULL);
if (IS_ERR(ce)) {
rc = PTR_ERR(ce);
goto out;
up_read(&htable_rw_lock);
goto out_free_path;
}
if (!cache_entry_expired(ce))
goto out;
if (!cache_entry_expired(ce)) {
up_read(&htable_rw_lock);
goto out_free_path;
}
up_read(&htable_rw_lock);
/* If it's a DFS Link, then use root SMB session for refreshing it */
if (is_dfs_link(npath)) {
......@@ -1411,35 +1448,29 @@ static void do_refresh_tcon(struct dfs_cache *dc, struct dfs_cache_vol_info *vi,
if (IS_ERR(ses)) {
rc = PTR_ERR(ses);
root_ses = NULL;
goto out;
goto out_free_path;
}
} else {
ses = tcon->ses;
}
if (unlikely(!ses->server->ops->get_dfs_refer)) {
rc = -EOPNOTSUPP;
} else {
rc = ses->server->ops->get_dfs_refer(xid, ses, path, &refs,
&numrefs, dc->dc_nlsc,
tcon->remap);
if (!rc) {
mutex_lock(&dfs_cache_list_lock);
ce = __update_cache_entry(npath, refs, numrefs);
mutex_unlock(&dfs_cache_list_lock);
dump_refs(refs, numrefs);
free_dfs_info_array(refs, numrefs);
if (IS_ERR(ce))
rc = PTR_ERR(ce);
}
rc = get_dfs_referral(xid, ses, cache_nlsc, tcon->remap, npath, &refs,
&numrefs);
if (!rc) {
dump_refs(refs, numrefs);
rc = update_cache_entry(npath, refs, numrefs);
free_dfs_info_array(refs, numrefs);
}
out:
if (root_ses)
cifs_put_smb_ses(root_ses);
free_xid(xid);
out_free_path:
free_normalized_path(path, npath);
out_free_xid:
free_xid(xid);
return rc;
}
/*
......@@ -1448,30 +1479,61 @@ static void do_refresh_tcon(struct dfs_cache *dc, struct dfs_cache_vol_info *vi,
*/
static void refresh_cache_worker(struct work_struct *work)
{
struct dfs_cache *dc = container_of(work, struct dfs_cache,
dc_refresh.work);
struct dfs_cache_vol_info *vi;
struct vol_info *vi, *nvi;
struct TCP_Server_Info *server;
LIST_HEAD(list);
LIST_HEAD(vols);
LIST_HEAD(tcons);
struct cifs_tcon *tcon, *ntcon;
int rc;
mutex_lock(&dc->dc_lock);
list_for_each_entry(vi, &dc->dc_vol_list, vi_list) {
server = cifs_find_tcp_session(&vi->vi_vol);
if (IS_ERR_OR_NULL(server))
/*
* Find SMB volumes that are eligible (server->tcpStatus == CifsGood)
* for refreshing.
*/
spin_lock(&vol_list_lock);
list_for_each_entry(vi, &vol_list, list) {
server = get_tcp_server(&vi->smb_vol);
if (!server)
continue;
if (server->tcpStatus != CifsGood)
goto next;
get_tcons(server, &list);
list_for_each_entry_safe(tcon, ntcon, &list, ulist) {
do_refresh_tcon(dc, vi, tcon);
kref_get(&vi->refcnt);
list_add_tail(&vi->rlist, &vols);
put_tcp_server(server);
}
spin_unlock(&vol_list_lock);
/* Walk through all TCONs and refresh any expired cache entry */
list_for_each_entry_safe(vi, nvi, &vols, rlist) {
spin_lock(&vi->smb_vol_lock);
server = get_tcp_server(&vi->smb_vol);
spin_unlock(&vi->smb_vol_lock);
if (!server)
goto next_vol;
get_tcons(server, &tcons);
rc = 0;
list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
/*
* Skip tcp server if any of its tcons failed to refresh
* (possibily due to reconnects).
*/
if (!rc)
rc = refresh_tcon(vi, tcon);
list_del_init(&tcon->ulist);
cifs_put_tcon(tcon);
}
next:
cifs_put_tcp_session(server, 0);
put_tcp_server(server);
next_vol:
list_del_init(&vi->rlist);
kref_put(&vi->refcnt, vol_release);
}
queue_delayed_work(cifsiod_wq, &dc->dc_refresh, dc->dc_ttl * HZ);
mutex_unlock(&dc->dc_lock);
spin_lock(&cache_ttl_lock);
queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
spin_unlock(&cache_ttl_lock);
}
......@@ -2921,7 +2921,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
"direct_writev couldn't get user pages "
"(rc=%zd) iter type %d iov_offset %zd "
"count %zd\n",
result, from->type,
result, iov_iter_type(from),
from->iov_offset, from->count);
dump_stack();
......@@ -3132,7 +3132,7 @@ static ssize_t __cifs_writev(
* In this case, fall back to non-direct write function.
* this could be improved by getting pages directly in ITER_KVEC
*/
if (direct && from->type & ITER_KVEC) {
if (direct && iov_iter_is_kvec(from)) {
cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
direct = false;
}
......@@ -3652,7 +3652,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
"couldn't get user pages (rc=%zd)"
" iter type %d"
" iov_offset %zd count %zd\n",
result, direct_iov.type,
result, iov_iter_type(&direct_iov),
direct_iov.iov_offset,
direct_iov.count);
dump_stack();
......@@ -3863,7 +3863,7 @@ static ssize_t __cifs_readv(
* fall back to data copy read path
* this could be improved by getting pages directly in ITER_KVEC
*/
if (direct && to->type & ITER_KVEC) {
if (direct && iov_iter_is_kvec(to)) {
cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
direct = false;
}
......
......@@ -2228,7 +2228,7 @@ int cifs_fiemap(struct inode *inode, struct fiemap_extent_info *fei, u64 start,
return -ENOTSUPP;
}
static int cifs_truncate_page(struct address_space *mapping, loff_t from)
int cifs_truncate_page(struct address_space *mapping, loff_t from)
{
pgoff_t index = from >> PAGE_SHIFT;
unsigned offset = from & (PAGE_SIZE - 1);
......@@ -2245,7 +2245,7 @@ static int cifs_truncate_page(struct address_space *mapping, loff_t from)
return rc;
}
static void cifs_setsize(struct inode *inode, loff_t offset)
void cifs_setsize(struct inode *inode, loff_t offset)
{
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
......
......@@ -743,7 +743,7 @@ __smb2_handle_cancelled_cmd(struct cifs_tcon *tcon, __u16 cmd, __u64 mid,
{
struct close_cancelled_open *cancelled;
cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
cancelled = kzalloc(sizeof(*cancelled), GFP_ATOMIC);
if (!cancelled)
return -ENOMEM;
......
......@@ -12,6 +12,7 @@
#include <linux/uuid.h>
#include <linux/sort.h>
#include <crypto/aead.h>
#include "cifsfs.h"
#include "cifsglob.h"
#include "smb2pdu.h"
#include "smb2proto.h"
......@@ -804,7 +805,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
sizeof(struct smb2_file_all_info),
&rsp_iov[1], sizeof(struct smb2_file_all_info),
(char *)&tcon->crfid.file_all_info))
tcon->crfid.file_all_info_is_valid = 1;
tcon->crfid.file_all_info_is_valid = true;
oshr_exit:
mutex_unlock(&tcon->crfid.fid_mutex);
......@@ -1523,7 +1524,9 @@ smb2_ioctl_query_info(const unsigned int xid,
COMPOUND_FID, COMPOUND_FID,
qi.info_type, true, buffer,
qi.output_buffer_length,
CIFSMaxBufSize);
CIFSMaxBufSize -
MAX_SMB2_CREATE_RESPONSE_SIZE -
MAX_SMB2_CLOSE_RESPONSE_SIZE);
}
} else if (qi.flags == PASSTHRU_SET_INFO) {
/* Can eventually relax perm check since server enforces too */
......@@ -2053,14 +2056,33 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_search_info *srch_inf)
{
__le16 *utf16_path;
int rc;
__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct smb_rqst rqst[2];
struct kvec rsp_iov[2];
int resp_buftype[2];
struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
struct kvec qd_iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
int rc, flags = 0;
u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct cifs_open_parms oparms;
struct smb2_query_directory_rsp *qd_rsp = NULL;
struct smb2_create_rsp *op_rsp = NULL;
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
if (!utf16_path)
return -ENOMEM;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
memset(rqst, 0, sizeof(rqst));
resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
memset(rsp_iov, 0, sizeof(rsp_iov));
/* Open */
memset(&open_iov, 0, sizeof(open_iov));
rqst[0].rq_iov = open_iov;
rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
oparms.disposition = FILE_OPEN;
......@@ -2071,22 +2093,75 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
oparms.fid = fid;
oparms.reconnect = false;
rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
kfree(utf16_path);
if (rc) {
cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
return rc;
}
rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
if (rc)
goto qdf_free;
smb2_set_next_command(tcon, &rqst[0]);
/* Query directory */
srch_inf->entries_in_buffer = 0;
srch_inf->index_of_last_entry = 2;
rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
fid->volatile_fid, 0, srch_inf);
if (rc) {
cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
memset(&qd_iov, 0, sizeof(qd_iov));
rqst[1].rq_iov = qd_iov;
rqst[1].rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
rc = SMB2_query_directory_init(xid, tcon, &rqst[1],
COMPOUND_FID, COMPOUND_FID,
0, srch_inf->info_level);
if (rc)
goto qdf_free;
smb2_set_related(&rqst[1]);
rc = compound_send_recv(xid, tcon->ses, flags, 2, rqst,
resp_buftype, rsp_iov);
/* If the open failed there is nothing to do */
op_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
if (op_rsp == NULL || op_rsp->sync_hdr.Status != STATUS_SUCCESS) {
cifs_dbg(FYI, "query_dir_first: open failed rc=%d\n", rc);
goto qdf_free;
}
fid->persistent_fid = op_rsp->PersistentFileId;
fid->volatile_fid = op_rsp->VolatileFileId;
/* Anything else than ENODATA means a genuine error */
if (rc && rc != -ENODATA) {
SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
cifs_dbg(FYI, "query_dir_first: query directory failed rc=%d\n", rc);
trace_smb3_query_dir_err(xid, fid->persistent_fid,
tcon->tid, tcon->ses->Suid, 0, 0, rc);
goto qdf_free;
}
qd_rsp = (struct smb2_query_directory_rsp *)rsp_iov[1].iov_base;
if (qd_rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
trace_smb3_query_dir_done(xid, fid->persistent_fid,
tcon->tid, tcon->ses->Suid, 0, 0);
srch_inf->endOfSearch = true;
rc = 0;
goto qdf_free;
}
rc = smb2_parse_query_directory(tcon, &rsp_iov[1], resp_buftype[1],
srch_inf);
if (rc) {
trace_smb3_query_dir_err(xid, fid->persistent_fid, tcon->tid,
tcon->ses->Suid, 0, 0, rc);
goto qdf_free;
}
resp_buftype[1] = CIFS_NO_BUFFER;
trace_smb3_query_dir_done(xid, fid->persistent_fid, tcon->tid,
tcon->ses->Suid, 0, srch_inf->entries_in_buffer);
qdf_free:
kfree(utf16_path);
SMB2_open_free(&rqst[0]);
SMB2_query_directory_free(&rqst[1]);
free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
return rc;
}
......@@ -2697,7 +2772,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_ioctl_init(tcon, &rqst[1], fid.persistent_fid,
fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
true /* is_fctl */, NULL, 0, CIFSMaxBufSize);
true /* is_fctl */, NULL, 0,
CIFSMaxBufSize -
MAX_SMB2_CREATE_RESPONSE_SIZE -
MAX_SMB2_CLOSE_RESPONSE_SIZE);
if (rc)
goto querty_exit;
......@@ -3094,29 +3172,33 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
return rc;
}
/*
* Extending the file
*/
if ((keep_size == false) && i_size_read(inode) < off + len) {
if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0)
smb2_set_sparse(xid, tcon, cfile, inode, false);
eof = cpu_to_le64(off + len);
rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, cfile->pid, &eof);
if (rc == 0) {
cifsi->server_eof = off + len;
cifs_setsize(inode, off + len);
cifs_truncate_page(inode->i_mapping, inode->i_size);
truncate_setsize(inode, off + len);
}
goto out;
}
/*
* Files are non-sparse by default so falloc may be a no-op
* Must check if file sparse. If not sparse, and not extending
* then no need to do anything since file already allocated
* Must check if file sparse. If not sparse, and since we are not
* extending then no need to do anything since file already allocated
*/
if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
if (keep_size == true)
rc = 0;
/* check if extending file */
else if (i_size_read(inode) >= off + len)
/* not extending file and already not sparse */
rc = 0;
/* BB: in future add else clause to extend file */
else
rc = -EOPNOTSUPP;
if (rc)
trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid, off, len, rc);
else
trace_smb3_falloc_done(xid, cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid, off, len);
free_xid(xid);
return rc;
rc = 0;
goto out;
}
if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
......@@ -3130,25 +3212,14 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
*/
if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
rc = -EOPNOTSUPP;
trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid, off, len, rc);
free_xid(xid);
return rc;
}
smb2_set_sparse(xid, tcon, cfile, inode, false);
rc = 0;
} else {
smb2_set_sparse(xid, tcon, cfile, inode, false);
rc = 0;
if (i_size_read(inode) < off + len) {
eof = cpu_to_le64(off + len);
rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, cfile->pid,
&eof);
goto out;
}
}
smb2_set_sparse(xid, tcon, cfile, inode, false);
rc = 0;
out:
if (rc)
trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
tcon->ses->Suid, off, len, rc);
......
......@@ -2199,13 +2199,14 @@ create_sd_buf(umode_t mode, unsigned int *len)
struct cifs_ace *pace;
unsigned int sdlen, acelen;
*len = roundup(sizeof(struct crt_sd_ctxt) + sizeof(struct cifs_ace), 8);
*len = roundup(sizeof(struct crt_sd_ctxt) + sizeof(struct cifs_ace) * 2,
8);
buf = kzalloc(*len, GFP_KERNEL);
if (buf == NULL)
return buf;
sdlen = sizeof(struct smb3_sd) + sizeof(struct smb3_acl) +
sizeof(struct cifs_ace);
2 * sizeof(struct cifs_ace);
buf->ccontext.DataOffset = cpu_to_le16(offsetof
(struct crt_sd_ctxt, sd));
......@@ -2232,8 +2233,12 @@ create_sd_buf(umode_t mode, unsigned int *len)
/* create one ACE to hold the mode embedded in reserved special SID */
pace = (struct cifs_ace *)(sizeof(struct crt_sd_ctxt) + (char *)buf);
acelen = setup_special_mode_ACE(pace, (__u64)mode);
/* and one more ACE to allow access for authenticated users */
pace = (struct cifs_ace *)(acelen + (sizeof(struct crt_sd_ctxt) +
(char *)buf));
acelen += setup_authusers_ACE(pace);
buf->acl.AclSize = cpu_to_le16(sizeof(struct cifs_acl) + acelen);
buf->acl.AceCount = cpu_to_le16(1);
buf->acl.AceCount = cpu_to_le16(2);
return buf;
}
......@@ -4296,56 +4301,38 @@ num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
/*
* Readdir/FindFirst
*/
int
SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int index,
struct cifs_search_info *srch_inf)
int SMB2_query_directory_init(const unsigned int xid,
struct cifs_tcon *tcon, struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid,
int index, int info_level)
{
struct smb_rqst rqst;
struct TCP_Server_Info *server = tcon->ses->server;
struct smb2_query_directory_req *req;
struct smb2_query_directory_rsp *rsp = NULL;
struct kvec iov[2];
struct kvec rsp_iov;
int rc = 0;
int len;
int resp_buftype = CIFS_NO_BUFFER;
unsigned char *bufptr;
struct TCP_Server_Info *server;
struct cifs_ses *ses = tcon->ses;
__le16 asteriks = cpu_to_le16('*');
char *end_of_smb;
unsigned int output_size = CIFSMaxBufSize;
size_t info_buf_size;
int flags = 0;
unsigned int output_size = CIFSMaxBufSize -
MAX_SMB2_CREATE_RESPONSE_SIZE -
MAX_SMB2_CLOSE_RESPONSE_SIZE;
unsigned int total_len;
if (ses && (ses->server))
server = ses->server;
else
return -EIO;
struct kvec *iov = rqst->rq_iov;
int len, rc;
rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req,
&total_len);
if (rc)
return rc;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
switch (srch_inf->info_level) {
switch (info_level) {
case SMB_FIND_FILE_DIRECTORY_INFO:
req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
break;
case SMB_FIND_FILE_ID_FULL_DIR_INFO:
req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
break;
default:
cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
srch_inf->info_level);
rc = -EINVAL;
goto qdir_exit;
info_level);
return -EINVAL;
}
req->FileIndex = cpu_to_le32(index);
......@@ -4374,40 +4361,50 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
iov[1].iov_base = (char *)(req->Buffer);
iov[1].iov_len = len;
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = iov;
rqst.rq_nvec = 2;
trace_smb3_query_dir_enter(xid, persistent_fid, tcon->tid,
tcon->ses->Suid, index, output_size);
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
return 0;
}
if (rc) {
if (rc == -ENODATA &&
rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
trace_smb3_query_dir_done(xid, persistent_fid,
tcon->tid, tcon->ses->Suid, index, 0);
srch_inf->endOfSearch = true;
rc = 0;
} else {
trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
tcon->ses->Suid, index, 0, rc);
cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
}
goto qdir_exit;
void SMB2_query_directory_free(struct smb_rqst *rqst)
{
if (rqst && rqst->rq_iov) {
cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
}
}
int
smb2_parse_query_directory(struct cifs_tcon *tcon,
struct kvec *rsp_iov,
int resp_buftype,
struct cifs_search_info *srch_inf)
{
struct smb2_query_directory_rsp *rsp;
size_t info_buf_size;
char *end_of_smb;
int rc;
rsp = (struct smb2_query_directory_rsp *)rsp_iov->iov_base;
switch (srch_inf->info_level) {
case SMB_FIND_FILE_DIRECTORY_INFO:
info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
break;
case SMB_FIND_FILE_ID_FULL_DIR_INFO:
info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
break;
default:
cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
srch_inf->info_level);
return -EINVAL;
}
rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
le32_to_cpu(rsp->OutputBufferLength), rsp_iov,
info_buf_size);
if (rc) {
trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
tcon->ses->Suid, index, 0, rc);
goto qdir_exit;
}
if (rc)
return rc;
srch_inf->unicode = true;
......@@ -4420,7 +4417,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
srch_inf->ntwrk_buf_start = (char *)rsp;
srch_inf->srch_entries_start = srch_inf->last_entry =
(char *)rsp + le16_to_cpu(rsp->OutputBufferOffset);
end_of_smb = rsp_iov.iov_len + (char *)rsp;
end_of_smb = rsp_iov->iov_len + (char *)rsp;
srch_inf->entries_in_buffer =
num_entries(srch_inf->srch_entries_start, end_of_smb,
&srch_inf->last_entry, info_buf_size);
......@@ -4435,11 +4432,72 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
else
cifs_tcon_dbg(VFS, "illegal search buffer type\n");
return 0;
}
int
SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int index,
struct cifs_search_info *srch_inf)
{
struct smb_rqst rqst;
struct kvec iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
struct smb2_query_directory_rsp *rsp = NULL;
int resp_buftype = CIFS_NO_BUFFER;
struct kvec rsp_iov;
int rc = 0;
struct cifs_ses *ses = tcon->ses;
int flags = 0;
if (!ses || !(ses->server))
return -EIO;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
memset(&rqst, 0, sizeof(struct smb_rqst));
memset(&iov, 0, sizeof(iov));
rqst.rq_iov = iov;
rqst.rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
rc = SMB2_query_directory_init(xid, tcon, &rqst, persistent_fid,
volatile_fid, index,
srch_inf->info_level);
if (rc)
goto qdir_exit;
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
if (rc) {
if (rc == -ENODATA &&
rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
trace_smb3_query_dir_done(xid, persistent_fid,
tcon->tid, tcon->ses->Suid, index, 0);
srch_inf->endOfSearch = true;
rc = 0;
} else {
trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
tcon->ses->Suid, index, 0, rc);
cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
}
goto qdir_exit;
}
rc = smb2_parse_query_directory(tcon, &rsp_iov, resp_buftype,
srch_inf);
if (rc) {
trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
tcon->ses->Suid, index, 0, rc);
goto qdir_exit;
}
resp_buftype = CIFS_NO_BUFFER;
trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid,
tcon->ses->Suid, index, srch_inf->entries_in_buffer);
return rc;
qdir_exit:
SMB2_query_directory_free(&rqst);
free_rsp_buf(resp_buftype, rsp);
return rc;
}
......
......@@ -1282,6 +1282,8 @@ struct smb2_echo_rsp {
#define SMB2_INDEX_SPECIFIED 0x04
#define SMB2_REOPEN 0x10
#define SMB2_QUERY_DIRECTORY_IOV_SIZE 2
struct smb2_query_directory_req {
struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 33 */
......
......@@ -197,6 +197,11 @@ extern int SMB2_echo(struct TCP_Server_Info *server);
extern int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int index,
struct cifs_search_info *srch_inf);
extern int SMB2_query_directory_init(unsigned int xid, struct cifs_tcon *tcon,
struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid,
int index, int info_level);
extern void SMB2_query_directory_free(struct smb_rqst *rqst);
extern int SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, u32 pid,
__le64 *eof);
......
......@@ -685,6 +685,8 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr,
* The default is for the mid to be synchronous, so the
* default callback just wakes up the current task.
*/
get_task_struct(current);
temp->creator = current;
temp->callback = cifs_wake_up_task;
temp->callback_data = current;
......
......@@ -76,6 +76,8 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
* The default is for the mid to be synchronous, so the
* default callback just wakes up the current task.
*/
get_task_struct(current);
temp->creator = current;
temp->callback = cifs_wake_up_task;
temp->callback_data = current;
......@@ -158,6 +160,7 @@ static void _cifs_mid_q_entry_release(struct kref *refcount)
}
}
#endif
put_task_struct(midEntry->creator);
mempool_free(midEntry, cifs_mid_poolp);
}
......
......@@ -32,7 +32,8 @@
#include "cifs_unicode.h"
#define MAX_EA_VALUE_SIZE CIFSMaxBufSize
#define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
#define CIFS_XATTR_CIFS_ACL "system.cifs_acl" /* DACL only */
#define CIFS_XATTR_CIFS_NTSD "system.cifs_ntsd" /* owner plus DACL */
#define CIFS_XATTR_ATTRIB "cifs.dosattrib" /* full name: user.cifs.dosattrib */
#define CIFS_XATTR_CREATETIME "cifs.creationtime" /* user.cifs.creationtime */
/*
......@@ -40,12 +41,62 @@
* confusing users and using the 20+ year old term 'cifs' when it is no longer
* secure, replaced by SMB2 (then even more highly secure SMB3) many years ago
*/
#define SMB3_XATTR_CIFS_ACL "system.smb3_acl"
#define SMB3_XATTR_CIFS_ACL "system.smb3_acl" /* DACL only */
#define SMB3_XATTR_CIFS_NTSD "system.smb3_ntsd" /* owner plus DACL */
#define SMB3_XATTR_ATTRIB "smb3.dosattrib" /* full name: user.smb3.dosattrib */
#define SMB3_XATTR_CREATETIME "smb3.creationtime" /* user.smb3.creationtime */
/* BB need to add server (Samba e.g) support for security and trusted prefix */
enum { XATTR_USER, XATTR_CIFS_ACL, XATTR_ACL_ACCESS, XATTR_ACL_DEFAULT };
enum { XATTR_USER, XATTR_CIFS_ACL, XATTR_ACL_ACCESS, XATTR_ACL_DEFAULT,
XATTR_CIFS_NTSD };
static int cifs_attrib_set(unsigned int xid, struct cifs_tcon *pTcon,
struct inode *inode, char *full_path,
const void *value, size_t size)
{
ssize_t rc = -EOPNOTSUPP;
__u32 *pattrib = (__u32 *)value;
__u32 attrib;
FILE_BASIC_INFO info_buf;
if ((value == NULL) || (size != sizeof(__u32)))
return -ERANGE;
memset(&info_buf, 0, sizeof(info_buf));
attrib = *pattrib;
info_buf.Attributes = cpu_to_le32(attrib);
if (pTcon->ses->server->ops->set_file_info)
rc = pTcon->ses->server->ops->set_file_info(inode, full_path,
&info_buf, xid);
if (rc == 0)
CIFS_I(inode)->cifsAttrs = attrib;
return rc;
}
static int cifs_creation_time_set(unsigned int xid, struct cifs_tcon *pTcon,
struct inode *inode, char *full_path,
const void *value, size_t size)
{
ssize_t rc = -EOPNOTSUPP;
__u64 *pcreation_time = (__u64 *)value;
__u64 creation_time;
FILE_BASIC_INFO info_buf;
if ((value == NULL) || (size != sizeof(__u64)))
return -ERANGE;
memset(&info_buf, 0, sizeof(info_buf));
creation_time = *pcreation_time;
info_buf.CreationTime = cpu_to_le64(creation_time);
if (pTcon->ses->server->ops->set_file_info)
rc = pTcon->ses->server->ops->set_file_info(inode, full_path,
&info_buf, xid);
if (rc == 0)
CIFS_I(inode)->createtime = creation_time;
return rc;
}
static int cifs_xattr_set(const struct xattr_handler *handler,
struct dentry *dentry, struct inode *inode,
......@@ -86,6 +137,23 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
switch (handler->flags) {
case XATTR_USER:
cifs_dbg(FYI, "%s:setting user xattr %s\n", __func__, name);
if ((strcmp(name, CIFS_XATTR_ATTRIB) == 0) ||
(strcmp(name, SMB3_XATTR_ATTRIB) == 0)) {
rc = cifs_attrib_set(xid, pTcon, inode, full_path,
value, size);
if (rc == 0) /* force revalidate of the inode */
CIFS_I(inode)->time = 0;
break;
} else if ((strcmp(name, CIFS_XATTR_CREATETIME) == 0) ||
(strcmp(name, SMB3_XATTR_CREATETIME) == 0)) {
rc = cifs_creation_time_set(xid, pTcon, inode,
full_path, value, size);
if (rc == 0) /* force revalidate of the inode */
CIFS_I(inode)->time = 0;
break;
}
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto out;
......@@ -95,7 +163,8 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
cifs_sb->local_nls, cifs_sb);
break;
case XATTR_CIFS_ACL: {
case XATTR_CIFS_ACL:
case XATTR_CIFS_NTSD: {
struct cifs_ntsd *pacl;
if (!value)
......@@ -106,12 +175,25 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
} else {
memcpy(pacl, value, size);
if (value &&
pTcon->ses->server->ops->set_acl)
rc = pTcon->ses->server->ops->set_acl(pacl,
size, inode,
full_path, CIFS_ACL_DACL);
else
pTcon->ses->server->ops->set_acl) {
rc = 0;
if (handler->flags == XATTR_CIFS_NTSD) {
/* set owner and DACL */
rc = pTcon->ses->server->ops->set_acl(
pacl, size, inode,
full_path,
CIFS_ACL_OWNER);
}
if (rc == 0) {
/* set DACL */
rc = pTcon->ses->server->ops->set_acl(
pacl, size, inode,
full_path,
CIFS_ACL_DACL);
}
} else {
rc = -EOPNOTSUPP;
}
if (rc == 0) /* force revalidate of the inode */
CIFS_I(inode)->time = 0;
kfree(pacl);
......@@ -179,7 +261,7 @@ static int cifs_creation_time_get(struct dentry *dentry, struct inode *inode,
void *value, size_t size)
{
ssize_t rc;
__u64 * pcreatetime;
__u64 *pcreatetime;
rc = cifs_revalidate_dentry_attr(dentry);
if (rc)
......@@ -244,7 +326,9 @@ static int cifs_xattr_get(const struct xattr_handler *handler,
full_path, name, value, size, cifs_sb);
break;
case XATTR_CIFS_ACL: {
case XATTR_CIFS_ACL:
case XATTR_CIFS_NTSD: {
/* the whole ntsd is fetched regardless */
u32 acllen;
struct cifs_ntsd *pacl;
......@@ -382,6 +466,26 @@ static const struct xattr_handler smb3_acl_xattr_handler = {
.set = cifs_xattr_set,
};
static const struct xattr_handler cifs_cifs_ntsd_xattr_handler = {
.name = CIFS_XATTR_CIFS_NTSD,
.flags = XATTR_CIFS_NTSD,
.get = cifs_xattr_get,
.set = cifs_xattr_set,
};
/*
* Although this is just an alias for the above, need to move away from
* confusing users and using the 20 year old term 'cifs' when it is no
* longer secure and was replaced by SMB2/SMB3 a long time ago, and
* SMB3 and later are highly secure.
*/
static const struct xattr_handler smb3_ntsd_xattr_handler = {
.name = SMB3_XATTR_CIFS_NTSD,
.flags = XATTR_CIFS_NTSD,
.get = cifs_xattr_get,
.set = cifs_xattr_set,
};
static const struct xattr_handler cifs_posix_acl_access_xattr_handler = {
.name = XATTR_NAME_POSIX_ACL_ACCESS,
.flags = XATTR_ACL_ACCESS,
......@@ -401,6 +505,8 @@ const struct xattr_handler *cifs_xattr_handlers[] = {
&cifs_os2_xattr_handler,
&cifs_cifs_acl_xattr_handler,
&smb3_acl_xattr_handler, /* alias for above since avoiding "cifs" */
&cifs_cifs_ntsd_xattr_handler,
&smb3_ntsd_xattr_handler, /* alias for above since avoiding "cifs" */
&cifs_posix_acl_access_xattr_handler,
&cifs_posix_acl_default_xattr_handler,
NULL
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment