Commit 633a8e89 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag '5.17-rc3-smb3-client-fixes' of git://git.samba.org/sfrench/cifs-2.6

Pull cifs fixes from Steve French:
 "SMB3 client fixes including:

   - multiple fscache related fixes, reenabling ability to read/write to
     cached files for cifs.ko (that was temporarily disabled for cifs.ko
     a few weeks ago due to the recent fscache changes)

   - also includes a new fscache helper function ("query_occupancy")
     used by above

   - fix for multiuser mounts and NTLMSSP auth (workstation name) for
     stable

   - fix locking ordering problem in multichannel code

   - trivial malformed comment fix"

* tag '5.17-rc3-smb3-client-fixes' of git://git.samba.org/sfrench/cifs-2.6:
  cifs: fix workstation_name for multiuser mounts
  Invalidate fscache cookie only when inode attributes are changed.
  cifs: Fix the readahead conversion to manage the batch when reading from cache
  cifs: Implement cache I/O by accessing the cache directly
  netfs, cachefiles: Add a method to query presence of data in the cache
  cifs: Transition from ->readpages() to ->readahead()
  cifs: unlock chan_lock before calling cifs_put_tcp_session
  Fix a warning about a malformed kernel doc comment in cifs
parents dcb85f85 d3b331fb
......@@ -462,6 +462,10 @@ operation table looks like the following::
struct iov_iter *iter,
netfs_io_terminated_t term_func,
void *term_func_priv);
int (*query_occupancy)(struct netfs_cache_resources *cres,
loff_t start, size_t len, size_t granularity,
loff_t *_data_start, size_t *_data_len);
};
With a termination handler function pointer::
......@@ -536,6 +540,18 @@ The methods defined in the table are:
indicating whether the termination is definitely happening in the caller's
context.
* ``query_occupancy()``
[Required] Called to find out where the next piece of data is within a
particular region of the cache. The start and length of the region to be
queried are passed in, along with the granularity to which the answer needs
to be aligned. The function passes back the start and length of the data,
if any, available within that region. Note that there may be a hole at the
front.
It returns 0 if some data was found, -ENODATA if there was no usable data
within the region or -ENOBUFS if there is no caching on this file.
Note that these methods are passed a pointer to the cache resource structure,
not the read request structure as they could be used in other situations where
there isn't a read request structure as well, such as writing dirty data to the
......
......@@ -191,6 +191,64 @@ static int cachefiles_read(struct netfs_cache_resources *cres,
return ret;
}
/*
* Query the occupancy of the cache in a region, returning where the next chunk
* of data starts and how long it is.
*/
static int cachefiles_query_occupancy(struct netfs_cache_resources *cres,
loff_t start, size_t len, size_t granularity,
loff_t *_data_start, size_t *_data_len)
{
struct cachefiles_object *object;
struct file *file;
loff_t off, off2;
*_data_start = -1;
*_data_len = 0;
if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
return -ENOBUFS;
object = cachefiles_cres_object(cres);
file = cachefiles_cres_file(cres);
granularity = max_t(size_t, object->volume->cache->bsize, granularity);
_enter("%pD,%li,%llx,%zx/%llx",
file, file_inode(file)->i_ino, start, len,
i_size_read(file_inode(file)));
off = cachefiles_inject_read_error();
if (off == 0)
off = vfs_llseek(file, start, SEEK_DATA);
if (off == -ENXIO)
return -ENODATA; /* Beyond EOF */
if (off < 0 && off >= (loff_t)-MAX_ERRNO)
return -ENOBUFS; /* Error. */
if (round_up(off, granularity) >= start + len)
return -ENODATA; /* No data in range */
off2 = cachefiles_inject_read_error();
if (off2 == 0)
off2 = vfs_llseek(file, off, SEEK_HOLE);
if (off2 == -ENXIO)
return -ENODATA; /* Beyond EOF */
if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO)
return -ENOBUFS; /* Error. */
/* Round away partial blocks */
off = round_up(off, granularity);
off2 = round_down(off2, granularity);
if (off2 <= off)
return -ENODATA;
*_data_start = off;
if (off2 > start + len)
*_data_len = len;
else
*_data_len = off2 - off;
return 0;
}
/*
* Handle completion of a write to the cache.
*/
......@@ -545,6 +603,7 @@ static const struct netfs_cache_ops cachefiles_netfs_cache_ops = {
.write = cachefiles_write,
.prepare_read = cachefiles_prepare_read,
.prepare_write = cachefiles_prepare_write,
.query_occupancy = cachefiles_query_occupancy,
};
/*
......
......@@ -162,7 +162,7 @@ static void cifs_resolve_server(struct work_struct *work)
mutex_unlock(&server->srv_mutex);
}
/**
/*
* Mark all sessions and tcons for reconnect.
*
* @server needs to be previously set to CifsNeedReconnect.
......@@ -1831,13 +1831,9 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
int i;
for (i = 1; i < chan_count; i++) {
/*
* note: for now, we're okay accessing ses->chans
* without chan_lock. But when chans can go away, we'll
* need to introduce ref counting to make sure that chan
* is not freed from under us.
*/
spin_unlock(&ses->chan_lock);
cifs_put_tcp_session(ses->chans[i].server, 0);
spin_lock(&ses->chan_lock);
ses->chans[i].server = NULL;
}
}
......@@ -1981,6 +1977,19 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
}
}
ctx->workstation_name = kstrdup(ses->workstation_name, GFP_KERNEL);
if (!ctx->workstation_name) {
cifs_dbg(FYI, "Unable to allocate memory for workstation_name\n");
rc = -ENOMEM;
kfree(ctx->username);
ctx->username = NULL;
kfree_sensitive(ctx->password);
ctx->password = NULL;
kfree(ctx->domainname);
ctx->domainname = NULL;
goto out_key_put;
}
out_key_put:
up_read(&key->sem);
key_put(key);
......
This diff is collapsed.
......@@ -134,37 +134,127 @@ void cifs_fscache_release_inode_cookie(struct inode *inode)
}
}
static inline void fscache_end_operation(struct netfs_cache_resources *cres)
{
const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
if (ops)
ops->end_operation(cres);
}
/*
* Retrieve a page from FS-Cache
* Fallback page reading interface.
*/
int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
static int fscache_fallback_read_page(struct inode *inode, struct page *page)
{
cifs_dbg(FYI, "%s: (fsc:%p, p:%p, i:0x%p\n",
__func__, CIFS_I(inode)->fscache, page, inode);
return -ENOBUFS; // Needs conversion to using netfslib
struct netfs_cache_resources cres;
struct fscache_cookie *cookie = cifs_inode_cookie(inode);
struct iov_iter iter;
struct bio_vec bvec[1];
int ret;
memset(&cres, 0, sizeof(cres));
bvec[0].bv_page = page;
bvec[0].bv_offset = 0;
bvec[0].bv_len = PAGE_SIZE;
iov_iter_bvec(&iter, READ, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
ret = fscache_begin_read_operation(&cres, cookie);
if (ret < 0)
return ret;
ret = fscache_read(&cres, page_offset(page), &iter, NETFS_READ_HOLE_FAIL,
NULL, NULL);
fscache_end_operation(&cres);
return ret;
}
/*
* Retrieve a set of pages from FS-Cache
* Fallback page writing interface.
*/
int __cifs_readpages_from_fscache(struct inode *inode,
struct address_space *mapping,
struct list_head *pages,
unsigned *nr_pages)
static int fscache_fallback_write_page(struct inode *inode, struct page *page,
bool no_space_allocated_yet)
{
cifs_dbg(FYI, "%s: (0x%p/%u/0x%p)\n",
__func__, CIFS_I(inode)->fscache, *nr_pages, inode);
return -ENOBUFS; // Needs conversion to using netfslib
struct netfs_cache_resources cres;
struct fscache_cookie *cookie = cifs_inode_cookie(inode);
struct iov_iter iter;
struct bio_vec bvec[1];
loff_t start = page_offset(page);
size_t len = PAGE_SIZE;
int ret;
memset(&cres, 0, sizeof(cres));
bvec[0].bv_page = page;
bvec[0].bv_offset = 0;
bvec[0].bv_len = PAGE_SIZE;
iov_iter_bvec(&iter, WRITE, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
ret = fscache_begin_write_operation(&cres, cookie);
if (ret < 0)
return ret;
ret = cres.ops->prepare_write(&cres, &start, &len, i_size_read(inode),
no_space_allocated_yet);
if (ret == 0)
ret = fscache_write(&cres, page_offset(page), &iter, NULL, NULL);
fscache_end_operation(&cres);
return ret;
}
void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
/*
* Retrieve a page from FS-Cache
*/
int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
{
struct cifsInodeInfo *cifsi = CIFS_I(inode);
int ret;
WARN_ON(!cifsi->fscache);
cifs_dbg(FYI, "%s: (fsc:%p, p:%p, i:0x%p\n",
__func__, cifs_inode_cookie(inode), page, inode);
ret = fscache_fallback_read_page(inode, page);
if (ret < 0)
return ret;
/* Read completed synchronously */
SetPageUptodate(page);
return 0;
}
void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
{
cifs_dbg(FYI, "%s: (fsc: %p, p: %p, i: %p)\n",
__func__, cifsi->fscache, page, inode);
__func__, cifs_inode_cookie(inode), page, inode);
fscache_fallback_write_page(inode, page, true);
}
/*
* Query the cache occupancy.
*/
int __cifs_fscache_query_occupancy(struct inode *inode,
pgoff_t first, unsigned int nr_pages,
pgoff_t *_data_first,
unsigned int *_data_nr_pages)
{
struct netfs_cache_resources cres;
struct fscache_cookie *cookie = cifs_inode_cookie(inode);
loff_t start, data_start;
size_t len, data_len;
int ret;
// Needs conversion to using netfslib
ret = fscache_begin_read_operation(&cres, cookie);
if (ret < 0)
return ret;
start = first * PAGE_SIZE;
len = nr_pages * PAGE_SIZE;
ret = cres.ops->query_occupancy(&cres, start, len, PAGE_SIZE,
&data_start, &data_len);
if (ret == 0) {
*_data_first = data_start / PAGE_SIZE;
*_data_nr_pages = len / PAGE_SIZE;
}
fscache_end_operation(&cres);
return ret;
}
......@@ -9,6 +9,7 @@
#ifndef _CIFS_FSCACHE_H
#define _CIFS_FSCACHE_H
#include <linux/swap.h>
#include <linux/fscache.h>
#include "cifsglob.h"
......@@ -58,14 +59,6 @@ void cifs_fscache_fill_coherency(struct inode *inode,
}
extern int cifs_fscache_release_page(struct page *page, gfp_t gfp);
extern int __cifs_readpage_from_fscache(struct inode *, struct page *);
extern int __cifs_readpages_from_fscache(struct inode *,
struct address_space *,
struct list_head *,
unsigned *);
extern void __cifs_readpage_to_fscache(struct inode *, struct page *);
static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode)
{
return CIFS_I(inode)->fscache;
......@@ -80,33 +73,52 @@ static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags
i_size_read(inode), flags);
}
static inline int cifs_readpage_from_fscache(struct inode *inode,
struct page *page)
{
if (CIFS_I(inode)->fscache)
return __cifs_readpage_from_fscache(inode, page);
extern int __cifs_fscache_query_occupancy(struct inode *inode,
pgoff_t first, unsigned int nr_pages,
pgoff_t *_data_first,
unsigned int *_data_nr_pages);
return -ENOBUFS;
static inline int cifs_fscache_query_occupancy(struct inode *inode,
pgoff_t first, unsigned int nr_pages,
pgoff_t *_data_first,
unsigned int *_data_nr_pages)
{
if (!cifs_inode_cookie(inode))
return -ENOBUFS;
return __cifs_fscache_query_occupancy(inode, first, nr_pages,
_data_first, _data_nr_pages);
}
static inline int cifs_readpages_from_fscache(struct inode *inode,
struct address_space *mapping,
struct list_head *pages,
unsigned *nr_pages)
extern int __cifs_readpage_from_fscache(struct inode *pinode, struct page *ppage);
extern void __cifs_readpage_to_fscache(struct inode *pinode, struct page *ppage);
static inline int cifs_readpage_from_fscache(struct inode *inode,
struct page *page)
{
if (CIFS_I(inode)->fscache)
return __cifs_readpages_from_fscache(inode, mapping, pages,
nr_pages);
if (cifs_inode_cookie(inode))
return __cifs_readpage_from_fscache(inode, page);
return -ENOBUFS;
}
static inline void cifs_readpage_to_fscache(struct inode *inode,
struct page *page)
{
if (PageFsCache(page))
if (cifs_inode_cookie(inode))
__cifs_readpage_to_fscache(inode, page);
}
static inline int cifs_fscache_release_page(struct page *page, gfp_t gfp)
{
if (PageFsCache(page)) {
if (current_is_kswapd() || !(gfp & __GFP_FS))
return false;
wait_on_page_fscache(page);
fscache_note_page_release(cifs_inode_cookie(page->mapping->host));
}
return true;
}
#else /* CONFIG_CIFS_FSCACHE */
static inline
void cifs_fscache_fill_coherency(struct inode *inode,
......@@ -123,22 +135,29 @@ static inline void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool upd
static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode) { return NULL; }
static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags) {}
static inline int
cifs_readpage_from_fscache(struct inode *inode, struct page *page)
static inline int cifs_fscache_query_occupancy(struct inode *inode,
pgoff_t first, unsigned int nr_pages,
pgoff_t *_data_first,
unsigned int *_data_nr_pages)
{
*_data_first = ULONG_MAX;
*_data_nr_pages = 0;
return -ENOBUFS;
}
static inline int cifs_readpages_from_fscache(struct inode *inode,
struct address_space *mapping,
struct list_head *pages,
unsigned *nr_pages)
static inline int
cifs_readpage_from_fscache(struct inode *inode, struct page *page)
{
return -ENOBUFS;
}
static inline void cifs_readpage_to_fscache(struct inode *inode,
struct page *page) {}
static inline
void cifs_readpage_to_fscache(struct inode *inode, struct page *page) {}
static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp)
{
return true; /* May release page */
}
#endif /* CONFIG_CIFS_FSCACHE */
......
......@@ -83,6 +83,7 @@ static void cifs_set_ops(struct inode *inode)
static void
cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
{
struct cifs_fscache_inode_coherency_data cd;
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
cifs_dbg(FYI, "%s: revalidating inode %llu\n",
......@@ -113,6 +114,9 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
cifs_dbg(FYI, "%s: invalidating inode %llu mapping\n",
__func__, cifs_i->uniqueid);
set_bit(CIFS_INO_INVALID_MAPPING, &cifs_i->flags);
/* Invalidate fscache cookie */
cifs_fscache_fill_coherency(&cifs_i->vfs_inode, &cd);
fscache_invalidate(cifs_inode_cookie(inode), &cd, i_size_read(inode), 0);
}
/*
......@@ -2261,8 +2265,6 @@ cifs_dentry_needs_reval(struct dentry *dentry)
int
cifs_invalidate_mapping(struct inode *inode)
{
struct cifs_fscache_inode_coherency_data cd;
struct cifsInodeInfo *cifsi = CIFS_I(inode);
int rc = 0;
if (inode->i_mapping && inode->i_mapping->nrpages != 0) {
......@@ -2272,8 +2274,6 @@ cifs_invalidate_mapping(struct inode *inode)
__func__, inode);
}
cifs_fscache_fill_coherency(&cifsi->vfs_inode, &cd);
fscache_invalidate(cifs_inode_cookie(inode), &cd, i_size_read(inode), 0);
return rc;
}
......
......@@ -713,7 +713,11 @@ static int size_of_ntlmssp_blob(struct cifs_ses *ses, int base_size)
else
sz += sizeof(__le16);
sz += sizeof(__le16) * strnlen(ses->workstation_name, CIFS_MAX_WORKSTATION_LEN);
if (ses->workstation_name)
sz += sizeof(__le16) * strnlen(ses->workstation_name,
CIFS_MAX_WORKSTATION_LEN);
else
sz += sizeof(__le16);
return sz;
}
......
......@@ -244,6 +244,13 @@ struct netfs_cache_ops {
int (*prepare_write)(struct netfs_cache_resources *cres,
loff_t *_start, size_t *_len, loff_t i_size,
bool no_space_allocated_yet);
/* Query the occupancy of the cache in a region, returning where the
* next chunk of data starts and how long it is.
*/
int (*query_occupancy)(struct netfs_cache_resources *cres,
loff_t start, size_t len, size_t granularity,
loff_t *_data_start, size_t *_data_len);
};
struct readahead_control;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment