Commit 28ac4ad0 authored by Al Viro's avatar Al Viro Committed by Greg Kroah-Hartman

lustre: simplify the living hell out of ksocknal_lib_recv_kiov()

... by using ITER_BVEC recvmsg
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent fd25dc34
...@@ -86,8 +86,10 @@ struct ksock_sched { /* per scheduler state */ ...@@ -86,8 +86,10 @@ struct ksock_sched { /* per scheduler state */
int kss_nconns; /* # connections assigned to int kss_nconns; /* # connections assigned to
* this scheduler */ * this scheduler */
struct ksock_sched_info *kss_info; /* owner of it */ struct ksock_sched_info *kss_info; /* owner of it */
struct page *kss_rx_scratch_pgs[LNET_MAX_IOV]; union {
struct kvec kss_scratch_iov[LNET_MAX_IOV]; struct bio_vec kss_scratch_bvec[LNET_MAX_IOV];
struct kvec kss_scratch_iov[LNET_MAX_IOV];
};
}; };
struct ksock_sched_info { struct ksock_sched_info {
......
...@@ -259,67 +259,11 @@ ksocknal_lib_recv_iov(struct ksock_conn *conn) ...@@ -259,67 +259,11 @@ ksocknal_lib_recv_iov(struct ksock_conn *conn)
return rc; return rc;
} }
static void
ksocknal_lib_kiov_vunmap(void *addr)
{
if (!addr)
return;
vunmap(addr);
}
static void *
ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
struct kvec *iov, struct page **pages)
{
void *addr;
int nob;
int i;
if (!*ksocknal_tunables.ksnd_zc_recv || !pages)
return NULL;
LASSERT(niov <= LNET_MAX_IOV);
if (niov < 2 ||
niov < *ksocknal_tunables.ksnd_zc_recv_min_nfrags)
return NULL;
for (nob = i = 0; i < niov; i++) {
if ((kiov[i].kiov_offset && i > 0) ||
(kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1))
return NULL;
pages[i] = kiov[i].kiov_page;
nob += kiov[i].kiov_len;
}
addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL);
if (!addr)
return NULL;
iov->iov_base = addr + kiov[0].kiov_offset;
iov->iov_len = nob;
return addr;
}
int int
ksocknal_lib_recv_kiov(struct ksock_conn *conn) ksocknal_lib_recv_kiov(struct ksock_conn *conn)
{ {
#if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK struct bio_vec *bv = conn->ksnc_scheduler->kss_scratch_bvec;
struct kvec scratch;
struct kvec *scratchiov = &scratch;
struct page **pages = NULL;
unsigned int niov = 1;
#else
#ifdef CONFIG_HIGHMEM
#warning "XXX risk of kmap deadlock on multiple frags..."
#endif
struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
struct page **pages = conn->ksnc_scheduler->kss_rx_scratch_pgs;
unsigned int niov = conn->ksnc_rx_nkiov; unsigned int niov = conn->ksnc_rx_nkiov;
#endif
lnet_kiov_t *kiov = conn->ksnc_rx_kiov; lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
struct msghdr msg = { struct msghdr msg = {
.msg_flags = 0 .msg_flags = 0
...@@ -328,44 +272,26 @@ ksocknal_lib_recv_kiov(struct ksock_conn *conn) ...@@ -328,44 +272,26 @@ ksocknal_lib_recv_kiov(struct ksock_conn *conn)
int i; int i;
int rc; int rc;
void *base; void *base;
void *addr;
int sum; int sum;
int fragnob; int fragnob;
int n; int n;
/* for (nob = i = 0; i < niov; i++) {
* NB we can't trust socket ops to either consume our iovs nob += bv[i].bv_len = kiov[i].kiov_len;
* or leave them alone. bv[i].bv_page = kiov[i].kiov_page;
*/ bv[i].bv_offset = kiov[i].kiov_offset;
addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages);
if (addr) {
nob = scratchiov[0].iov_len;
n = 1;
} else {
for (nob = i = 0; i < niov; i++) {
nob += scratchiov[i].iov_len = kiov[i].kiov_len;
scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
kiov[i].kiov_offset;
}
n = niov;
} }
n = niov;
LASSERT(nob <= conn->ksnc_rx_nob_wanted); LASSERT(nob <= conn->ksnc_rx_nob_wanted);
rc = kernel_recvmsg(conn->ksnc_sock, &msg, (struct kvec *)scratchiov, iov_iter_bvec(&msg.msg_iter, READ | ITER_BVEC, bv, n, nob);
n, nob, MSG_DONTWAIT); rc = sock_recvmsg(conn->ksnc_sock, &msg, MSG_DONTWAIT);
if (conn->ksnc_msg.ksm_csum) { if (conn->ksnc_msg.ksm_csum) {
for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) { for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
LASSERT(i < niov); LASSERT(i < niov);
/*
* Dang! have to kmap again because I have nowhere to
* stash the mapped address. But by doing it while the
* page is still mapped, the kernel just bumps the map
* count and returns me the address it stashed.
*/
base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset; base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
fragnob = kiov[i].kiov_len; fragnob = kiov[i].kiov_len;
if (fragnob > sum) if (fragnob > sum)
...@@ -377,14 +303,6 @@ ksocknal_lib_recv_kiov(struct ksock_conn *conn) ...@@ -377,14 +303,6 @@ ksocknal_lib_recv_kiov(struct ksock_conn *conn)
kunmap(kiov[i].kiov_page); kunmap(kiov[i].kiov_page);
} }
} }
if (addr) {
ksocknal_lib_kiov_vunmap(addr);
} else {
for (i = 0; i < niov; i++)
kunmap(kiov[i].kiov_page);
}
return rc; return rc;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment