Commit 65ffc679 authored by Al Viro's avatar Al Viro Committed by Greg Kroah-Hartman

lustre: don't reinvent struct bio_vec

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 28ac4ad0
...@@ -503,21 +503,7 @@ typedef struct { ...@@ -503,21 +503,7 @@ typedef struct {
/* NB lustre portals uses struct iovec internally! */ /* NB lustre portals uses struct iovec internally! */
typedef struct iovec lnet_md_iovec_t; typedef struct iovec lnet_md_iovec_t;
/** typedef struct bio_vec lnet_kiov_t;
* A page-based fragment of a MD.
*/
typedef struct {
/** Pointer to the page where the fragment resides */
struct page *kiov_page;
/** Length in bytes of the fragment */
unsigned int kiov_len;
/**
* Starting offset of the fragment within the page. Note that the
* end of the fragment must not pass the end of the page; i.e.,
* kiov_len + kiov_offset <= PAGE_SIZE.
*/
unsigned int kiov_offset;
} lnet_kiov_t;
/** @} lnet_md */ /** @} lnet_md */
/** \addtogroup lnet_eq /** \addtogroup lnet_eq
......
...@@ -717,8 +717,8 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd, ...@@ -717,8 +717,8 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd,
LASSERT(nkiov > 0); LASSERT(nkiov > 0);
LASSERT(net); LASSERT(net);
while (offset >= kiov->kiov_len) { while (offset >= kiov->bv_len) {
offset -= kiov->kiov_len; offset -= kiov->bv_len;
nkiov--; nkiov--;
kiov++; kiov++;
LASSERT(nkiov > 0); LASSERT(nkiov > 0);
...@@ -728,10 +728,10 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd, ...@@ -728,10 +728,10 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd,
do { do {
LASSERT(nkiov > 0); LASSERT(nkiov > 0);
fragnob = min((int)(kiov->kiov_len - offset), nob); fragnob = min((int)(kiov->bv_len - offset), nob);
sg_set_page(sg, kiov->kiov_page, fragnob, sg_set_page(sg, kiov->bv_page, fragnob,
kiov->kiov_offset + offset); kiov->bv_offset + offset);
sg = sg_next(sg); sg = sg_next(sg);
if (!sg) { if (!sg) {
CERROR("lacking enough sg entries to map tx\n"); CERROR("lacking enough sg entries to map tx\n");
......
...@@ -164,13 +164,13 @@ ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx) ...@@ -164,13 +164,13 @@ ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
do { do {
LASSERT(tx->tx_nkiov > 0); LASSERT(tx->tx_nkiov > 0);
if (nob < (int)kiov->kiov_len) { if (nob < (int)kiov->bv_len) {
kiov->kiov_offset += nob; kiov->bv_offset += nob;
kiov->kiov_len -= nob; kiov->bv_len -= nob;
return rc; return rc;
} }
nob -= (int)kiov->kiov_len; nob -= (int)kiov->bv_len;
tx->tx_kiov = ++kiov; tx->tx_kiov = ++kiov;
tx->tx_nkiov--; tx->tx_nkiov--;
} while (nob); } while (nob);
...@@ -326,13 +326,13 @@ ksocknal_recv_kiov(struct ksock_conn *conn) ...@@ -326,13 +326,13 @@ ksocknal_recv_kiov(struct ksock_conn *conn)
do { do {
LASSERT(conn->ksnc_rx_nkiov > 0); LASSERT(conn->ksnc_rx_nkiov > 0);
if (nob < (int)kiov->kiov_len) { if (nob < (int)kiov->bv_len) {
kiov->kiov_offset += nob; kiov->bv_offset += nob;
kiov->kiov_len -= nob; kiov->bv_len -= nob;
return -EAGAIN; return -EAGAIN;
} }
nob -= kiov->kiov_len; nob -= kiov->bv_len;
conn->ksnc_rx_kiov = ++kiov; conn->ksnc_rx_kiov = ++kiov;
conn->ksnc_rx_nkiov--; conn->ksnc_rx_nkiov--;
} while (nob); } while (nob);
......
...@@ -131,13 +131,13 @@ ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx) ...@@ -131,13 +131,13 @@ ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
if (tx->tx_msg.ksm_zc_cookies[0]) { if (tx->tx_msg.ksm_zc_cookies[0]) {
/* Zero copy is enabled */ /* Zero copy is enabled */
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
struct page *page = kiov->kiov_page; struct page *page = kiov->bv_page;
int offset = kiov->kiov_offset; int offset = kiov->bv_offset;
int fragsize = kiov->kiov_len; int fragsize = kiov->bv_len;
int msgflg = MSG_DONTWAIT; int msgflg = MSG_DONTWAIT;
CDEBUG(D_NET, "page %p + offset %x for %d\n", CDEBUG(D_NET, "page %p + offset %x for %d\n",
page, offset, kiov->kiov_len); page, offset, kiov->bv_len);
if (!list_empty(&conn->ksnc_tx_queue) || if (!list_empty(&conn->ksnc_tx_queue) ||
fragsize < tx->tx_resid) fragsize < tx->tx_resid)
...@@ -165,9 +165,9 @@ ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx) ...@@ -165,9 +165,9 @@ ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
int i; int i;
for (nob = i = 0; i < niov; i++) { for (nob = i = 0; i < niov; i++) {
scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + scratchiov[i].iov_base = kmap(kiov[i].bv_page) +
kiov[i].kiov_offset; kiov[i].bv_offset;
nob += scratchiov[i].iov_len = kiov[i].kiov_len; nob += scratchiov[i].iov_len = kiov[i].bv_len;
} }
if (!list_empty(&conn->ksnc_tx_queue) || if (!list_empty(&conn->ksnc_tx_queue) ||
...@@ -177,7 +177,7 @@ ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx) ...@@ -177,7 +177,7 @@ ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob); rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob);
for (i = 0; i < niov; i++) for (i = 0; i < niov; i++)
kunmap(kiov[i].kiov_page); kunmap(kiov[i].bv_page);
} }
return rc; return rc;
} }
...@@ -262,7 +262,6 @@ ksocknal_lib_recv_iov(struct ksock_conn *conn) ...@@ -262,7 +262,6 @@ ksocknal_lib_recv_iov(struct ksock_conn *conn)
int int
ksocknal_lib_recv_kiov(struct ksock_conn *conn) ksocknal_lib_recv_kiov(struct ksock_conn *conn)
{ {
struct bio_vec *bv = conn->ksnc_scheduler->kss_scratch_bvec;
unsigned int niov = conn->ksnc_rx_nkiov; unsigned int niov = conn->ksnc_rx_nkiov;
lnet_kiov_t *kiov = conn->ksnc_rx_kiov; lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
struct msghdr msg = { struct msghdr msg = {
...@@ -274,33 +273,28 @@ ksocknal_lib_recv_kiov(struct ksock_conn *conn) ...@@ -274,33 +273,28 @@ ksocknal_lib_recv_kiov(struct ksock_conn *conn)
void *base; void *base;
int sum; int sum;
int fragnob; int fragnob;
int n;
for (nob = i = 0; i < niov; i++) { for (nob = i = 0; i < niov; i++)
nob += bv[i].bv_len = kiov[i].kiov_len; nob += kiov[i].bv_len;
bv[i].bv_page = kiov[i].kiov_page;
bv[i].bv_offset = kiov[i].kiov_offset;
}
n = niov;
LASSERT(nob <= conn->ksnc_rx_nob_wanted); LASSERT(nob <= conn->ksnc_rx_nob_wanted);
iov_iter_bvec(&msg.msg_iter, READ | ITER_BVEC, bv, n, nob); iov_iter_bvec(&msg.msg_iter, READ | ITER_BVEC, kiov, niov, nob);
rc = sock_recvmsg(conn->ksnc_sock, &msg, MSG_DONTWAIT); rc = sock_recvmsg(conn->ksnc_sock, &msg, MSG_DONTWAIT);
if (conn->ksnc_msg.ksm_csum) { if (conn->ksnc_msg.ksm_csum) {
for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) { for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
LASSERT(i < niov); LASSERT(i < niov);
base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset; base = kmap(kiov[i].bv_page) + kiov[i].bv_offset;
fragnob = kiov[i].kiov_len; fragnob = kiov[i].bv_len;
if (fragnob > sum) if (fragnob > sum)
fragnob = sum; fragnob = sum;
conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum, conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
base, fragnob); base, fragnob);
kunmap(kiov[i].kiov_page); kunmap(kiov[i].bv_page);
} }
} }
return rc; return rc;
...@@ -324,12 +318,12 @@ ksocknal_lib_csum_tx(struct ksock_tx *tx) ...@@ -324,12 +318,12 @@ ksocknal_lib_csum_tx(struct ksock_tx *tx)
if (tx->tx_kiov) { if (tx->tx_kiov) {
for (i = 0; i < tx->tx_nkiov; i++) { for (i = 0; i < tx->tx_nkiov; i++) {
base = kmap(tx->tx_kiov[i].kiov_page) + base = kmap(tx->tx_kiov[i].bv_page) +
tx->tx_kiov[i].kiov_offset; tx->tx_kiov[i].bv_offset;
csum = ksocknal_csum(csum, base, tx->tx_kiov[i].kiov_len); csum = ksocknal_csum(csum, base, tx->tx_kiov[i].bv_len);
kunmap(tx->tx_kiov[i].kiov_page); kunmap(tx->tx_kiov[i].bv_page);
} }
} else { } else {
for (i = 1; i < tx->tx_niov; i++) for (i = 1; i < tx->tx_niov; i++)
......
...@@ -134,11 +134,11 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink) ...@@ -134,11 +134,11 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
for (i = 0; i < (int)niov; i++) { for (i = 0; i < (int)niov; i++) {
/* We take the page pointer on trust */ /* We take the page pointer on trust */
if (lmd->md_iov.kiov[i].kiov_offset + if (lmd->md_iov.kiov[i].bv_offset +
lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE) lmd->md_iov.kiov[i].bv_len > PAGE_SIZE)
return -EINVAL; /* invalid length */ return -EINVAL; /* invalid length */
total_length += lmd->md_iov.kiov[i].kiov_len; total_length += lmd->md_iov.kiov[i].bv_len;
} }
lmd->md_length = total_length; lmd->md_length = total_length;
......
...@@ -280,7 +280,7 @@ lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov) ...@@ -280,7 +280,7 @@ lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov)
LASSERT(!niov || kiov); LASSERT(!niov || kiov);
while (niov-- > 0) while (niov-- > 0)
nob += (kiov++)->kiov_len; nob += (kiov++)->bv_len;
return nob; return nob;
} }
...@@ -302,16 +302,16 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset, ...@@ -302,16 +302,16 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
LASSERT(!in_interrupt()); LASSERT(!in_interrupt());
LASSERT(ndiov > 0); LASSERT(ndiov > 0);
while (doffset >= diov->kiov_len) { while (doffset >= diov->bv_len) {
doffset -= diov->kiov_len; doffset -= diov->bv_len;
diov++; diov++;
ndiov--; ndiov--;
LASSERT(ndiov > 0); LASSERT(ndiov > 0);
} }
LASSERT(nsiov > 0); LASSERT(nsiov > 0);
while (soffset >= siov->kiov_len) { while (soffset >= siov->bv_len) {
soffset -= siov->kiov_len; soffset -= siov->bv_len;
siov++; siov++;
nsiov--; nsiov--;
LASSERT(nsiov > 0); LASSERT(nsiov > 0);
...@@ -320,16 +320,16 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset, ...@@ -320,16 +320,16 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
do { do {
LASSERT(ndiov > 0); LASSERT(ndiov > 0);
LASSERT(nsiov > 0); LASSERT(nsiov > 0);
this_nob = min(diov->kiov_len - doffset, this_nob = min(diov->bv_len - doffset,
siov->kiov_len - soffset); siov->bv_len - soffset);
this_nob = min(this_nob, nob); this_nob = min(this_nob, nob);
if (!daddr) if (!daddr)
daddr = ((char *)kmap(diov->kiov_page)) + daddr = ((char *)kmap(diov->bv_page)) +
diov->kiov_offset + doffset; diov->bv_offset + doffset;
if (!saddr) if (!saddr)
saddr = ((char *)kmap(siov->kiov_page)) + saddr = ((char *)kmap(siov->bv_page)) +
siov->kiov_offset + soffset; siov->bv_offset + soffset;
/* /*
* Vanishing risk of kmap deadlock when mapping 2 pages. * Vanishing risk of kmap deadlock when mapping 2 pages.
...@@ -339,22 +339,22 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset, ...@@ -339,22 +339,22 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
memcpy(daddr, saddr, this_nob); memcpy(daddr, saddr, this_nob);
nob -= this_nob; nob -= this_nob;
if (diov->kiov_len > doffset + this_nob) { if (diov->bv_len > doffset + this_nob) {
daddr += this_nob; daddr += this_nob;
doffset += this_nob; doffset += this_nob;
} else { } else {
kunmap(diov->kiov_page); kunmap(diov->bv_page);
daddr = NULL; daddr = NULL;
diov++; diov++;
ndiov--; ndiov--;
doffset = 0; doffset = 0;
} }
if (siov->kiov_len > soffset + this_nob) { if (siov->bv_len > soffset + this_nob) {
saddr += this_nob; saddr += this_nob;
soffset += this_nob; soffset += this_nob;
} else { } else {
kunmap(siov->kiov_page); kunmap(siov->bv_page);
saddr = NULL; saddr = NULL;
siov++; siov++;
nsiov--; nsiov--;
...@@ -363,9 +363,9 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset, ...@@ -363,9 +363,9 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
} while (nob > 0); } while (nob > 0);
if (daddr) if (daddr)
kunmap(diov->kiov_page); kunmap(diov->bv_page);
if (saddr) if (saddr)
kunmap(siov->kiov_page); kunmap(siov->bv_page);
} }
EXPORT_SYMBOL(lnet_copy_kiov2kiov); EXPORT_SYMBOL(lnet_copy_kiov2kiov);
...@@ -392,8 +392,8 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset, ...@@ -392,8 +392,8 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset,
} }
LASSERT(nkiov > 0); LASSERT(nkiov > 0);
while (kiovoffset >= kiov->kiov_len) { while (kiovoffset >= kiov->bv_len) {
kiovoffset -= kiov->kiov_len; kiovoffset -= kiov->bv_len;
kiov++; kiov++;
nkiov--; nkiov--;
LASSERT(nkiov > 0); LASSERT(nkiov > 0);
...@@ -403,12 +403,12 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset, ...@@ -403,12 +403,12 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset,
LASSERT(niov > 0); LASSERT(niov > 0);
LASSERT(nkiov > 0); LASSERT(nkiov > 0);
this_nob = min(iov->iov_len - iovoffset, this_nob = min(iov->iov_len - iovoffset,
(__kernel_size_t)kiov->kiov_len - kiovoffset); (__kernel_size_t)kiov->bv_len - kiovoffset);
this_nob = min(this_nob, nob); this_nob = min(this_nob, nob);
if (!addr) if (!addr)
addr = ((char *)kmap(kiov->kiov_page)) + addr = ((char *)kmap(kiov->bv_page)) +
kiov->kiov_offset + kiovoffset; kiov->bv_offset + kiovoffset;
memcpy((char *)iov->iov_base + iovoffset, addr, this_nob); memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
nob -= this_nob; nob -= this_nob;
...@@ -421,11 +421,11 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset, ...@@ -421,11 +421,11 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset,
iovoffset = 0; iovoffset = 0;
} }
if (kiov->kiov_len > kiovoffset + this_nob) { if (kiov->bv_len > kiovoffset + this_nob) {
addr += this_nob; addr += this_nob;
kiovoffset += this_nob; kiovoffset += this_nob;
} else { } else {
kunmap(kiov->kiov_page); kunmap(kiov->bv_page);
addr = NULL; addr = NULL;
kiov++; kiov++;
nkiov--; nkiov--;
...@@ -435,7 +435,7 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset, ...@@ -435,7 +435,7 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset,
} while (nob > 0); } while (nob > 0);
if (addr) if (addr)
kunmap(kiov->kiov_page); kunmap(kiov->bv_page);
} }
EXPORT_SYMBOL(lnet_copy_kiov2iov); EXPORT_SYMBOL(lnet_copy_kiov2iov);
...@@ -455,8 +455,8 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, ...@@ -455,8 +455,8 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
LASSERT(!in_interrupt()); LASSERT(!in_interrupt());
LASSERT(nkiov > 0); LASSERT(nkiov > 0);
while (kiovoffset >= kiov->kiov_len) { while (kiovoffset >= kiov->bv_len) {
kiovoffset -= kiov->kiov_len; kiovoffset -= kiov->bv_len;
kiov++; kiov++;
nkiov--; nkiov--;
LASSERT(nkiov > 0); LASSERT(nkiov > 0);
...@@ -473,22 +473,22 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, ...@@ -473,22 +473,22 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
do { do {
LASSERT(nkiov > 0); LASSERT(nkiov > 0);
LASSERT(niov > 0); LASSERT(niov > 0);
this_nob = min((__kernel_size_t)kiov->kiov_len - kiovoffset, this_nob = min((__kernel_size_t)kiov->bv_len - kiovoffset,
iov->iov_len - iovoffset); iov->iov_len - iovoffset);
this_nob = min(this_nob, nob); this_nob = min(this_nob, nob);
if (!addr) if (!addr)
addr = ((char *)kmap(kiov->kiov_page)) + addr = ((char *)kmap(kiov->bv_page)) +
kiov->kiov_offset + kiovoffset; kiov->bv_offset + kiovoffset;
memcpy(addr, (char *)iov->iov_base + iovoffset, this_nob); memcpy(addr, (char *)iov->iov_base + iovoffset, this_nob);
nob -= this_nob; nob -= this_nob;
if (kiov->kiov_len > kiovoffset + this_nob) { if (kiov->bv_len > kiovoffset + this_nob) {
addr += this_nob; addr += this_nob;
kiovoffset += this_nob; kiovoffset += this_nob;
} else { } else {
kunmap(kiov->kiov_page); kunmap(kiov->bv_page);
addr = NULL; addr = NULL;
kiov++; kiov++;
nkiov--; nkiov--;
...@@ -505,7 +505,7 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, ...@@ -505,7 +505,7 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
} while (nob > 0); } while (nob > 0);
if (addr) if (addr)
kunmap(kiov->kiov_page); kunmap(kiov->bv_page);
} }
EXPORT_SYMBOL(lnet_copy_iov2kiov); EXPORT_SYMBOL(lnet_copy_iov2kiov);
...@@ -526,8 +526,8 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst, ...@@ -526,8 +526,8 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
return 0; /* no frags */ return 0; /* no frags */
LASSERT(src_niov > 0); LASSERT(src_niov > 0);
while (offset >= src->kiov_len) { /* skip initial frags */ while (offset >= src->bv_len) { /* skip initial frags */
offset -= src->kiov_len; offset -= src->bv_len;
src_niov--; src_niov--;
src++; src++;
LASSERT(src_niov > 0); LASSERT(src_niov > 0);
...@@ -538,19 +538,19 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst, ...@@ -538,19 +538,19 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
LASSERT(src_niov > 0); LASSERT(src_niov > 0);
LASSERT((int)niov <= dst_niov); LASSERT((int)niov <= dst_niov);
frag_len = src->kiov_len - offset; frag_len = src->bv_len - offset;
dst->kiov_page = src->kiov_page; dst->bv_page = src->bv_page;
dst->kiov_offset = src->kiov_offset + offset; dst->bv_offset = src->bv_offset + offset;
if (len <= frag_len) { if (len <= frag_len) {
dst->kiov_len = len; dst->bv_len = len;
LASSERT(dst->kiov_offset + dst->kiov_len LASSERT(dst->bv_offset + dst->bv_len
<= PAGE_SIZE); <= PAGE_SIZE);
return niov; return niov;
} }
dst->kiov_len = frag_len; dst->bv_len = frag_len;
LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE); LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
len -= frag_len; len -= frag_len;
dst++; dst++;
......
...@@ -1307,7 +1307,7 @@ lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages) ...@@ -1307,7 +1307,7 @@ lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]); int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
while (--npages >= 0) while (--npages >= 0)
__free_page(rb->rb_kiov[npages].kiov_page); __free_page(rb->rb_kiov[npages].bv_page);
LIBCFS_FREE(rb, sz); LIBCFS_FREE(rb, sz);
} }
...@@ -1333,15 +1333,15 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt) ...@@ -1333,15 +1333,15 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
GFP_KERNEL | __GFP_ZERO, 0); GFP_KERNEL | __GFP_ZERO, 0);
if (!page) { if (!page) {
while (--i >= 0) while (--i >= 0)
__free_page(rb->rb_kiov[i].kiov_page); __free_page(rb->rb_kiov[i].bv_page);
LIBCFS_FREE(rb, sz); LIBCFS_FREE(rb, sz);
return NULL; return NULL;
} }
rb->rb_kiov[i].kiov_len = PAGE_SIZE; rb->rb_kiov[i].bv_len = PAGE_SIZE;
rb->rb_kiov[i].kiov_offset = 0; rb->rb_kiov[i].bv_offset = 0;
rb->rb_kiov[i].kiov_page = page; rb->rb_kiov[i].bv_page = page;
} }
return rb; return rb;
......
...@@ -226,7 +226,7 @@ brw_fill_bulk(struct srpc_bulk *bk, int pattern, __u64 magic) ...@@ -226,7 +226,7 @@ brw_fill_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
struct page *pg; struct page *pg;
for (i = 0; i < bk->bk_niov; i++) { for (i = 0; i < bk->bk_niov; i++) {
pg = bk->bk_iovs[i].kiov_page; pg = bk->bk_iovs[i].bv_page;
brw_fill_page(pg, pattern, magic); brw_fill_page(pg, pattern, magic);
} }
} }
...@@ -238,7 +238,7 @@ brw_check_bulk(struct srpc_bulk *bk, int pattern, __u64 magic) ...@@ -238,7 +238,7 @@ brw_check_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
struct page *pg; struct page *pg;
for (i = 0; i < bk->bk_niov; i++) { for (i = 0; i < bk->bk_niov; i++) {
pg = bk->bk_iovs[i].kiov_page; pg = bk->bk_iovs[i].bv_page;
if (brw_check_page(pg, pattern, magic)) { if (brw_check_page(pg, pattern, magic)) {
CERROR("Bulk page %p (%d/%d) is corrupted!\n", CERROR("Bulk page %p (%d/%d) is corrupted!\n",
pg, i, bk->bk_niov); pg, i, bk->bk_niov);
......
...@@ -152,10 +152,10 @@ lstcon_rpc_put(struct lstcon_rpc *crpc) ...@@ -152,10 +152,10 @@ lstcon_rpc_put(struct lstcon_rpc *crpc)
LASSERT(list_empty(&crpc->crp_link)); LASSERT(list_empty(&crpc->crp_link));
for (i = 0; i < bulk->bk_niov; i++) { for (i = 0; i < bulk->bk_niov; i++) {
if (!bulk->bk_iovs[i].kiov_page) if (!bulk->bk_iovs[i].bv_page)
continue; continue;
__free_page(bulk->bk_iovs[i].kiov_page); __free_page(bulk->bk_iovs[i].bv_page);
} }
srpc_client_rpc_decref(crpc->crp_rpc); srpc_client_rpc_decref(crpc->crp_rpc);
...@@ -705,7 +705,7 @@ lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov) ...@@ -705,7 +705,7 @@ lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov)
LASSERT(i < nkiov); LASSERT(i < nkiov);
pid = (lnet_process_id_packed_t *)page_address(kiov[i].kiov_page); pid = (lnet_process_id_packed_t *)page_address(kiov[i].bv_page);
return &pid[idx % SFW_ID_PER_PAGE]; return &pid[idx % SFW_ID_PER_PAGE];
} }
...@@ -849,12 +849,11 @@ lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned feats, ...@@ -849,12 +849,11 @@ lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
min_t(int, nob, PAGE_SIZE); min_t(int, nob, PAGE_SIZE);
nob -= len; nob -= len;
bulk->bk_iovs[i].kiov_offset = 0; bulk->bk_iovs[i].bv_offset = 0;
bulk->bk_iovs[i].kiov_len = len; bulk->bk_iovs[i].bv_len = len;
bulk->bk_iovs[i].kiov_page = bulk->bk_iovs[i].bv_page = alloc_page(GFP_KERNEL);
alloc_page(GFP_KERNEL);
if (!bulk->bk_iovs[i].kiov_page) { if (!bulk->bk_iovs[i].bv_page) {
lstcon_rpc_put(*crpc); lstcon_rpc_put(*crpc);
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -784,7 +784,7 @@ sfw_add_test_instance(struct sfw_batch *tsb, struct srpc_server_rpc *rpc) ...@@ -784,7 +784,7 @@ sfw_add_test_instance(struct sfw_batch *tsb, struct srpc_server_rpc *rpc)
lnet_process_id_packed_t id; lnet_process_id_packed_t id;
int j; int j;
dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page); dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].bv_page);
LASSERT(dests); /* my pages are within KVM always */ LASSERT(dests); /* my pages are within KVM always */
id = dests[i % SFW_ID_PER_PAGE]; id = dests[i % SFW_ID_PER_PAGE];
if (msg->msg_magic != SRPC_MSG_MAGIC) if (msg->msg_magic != SRPC_MSG_MAGIC)
......
...@@ -91,9 +91,9 @@ srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int nob) ...@@ -91,9 +91,9 @@ srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int nob)
LASSERT(nob > 0); LASSERT(nob > 0);
LASSERT(i >= 0 && i < bk->bk_niov); LASSERT(i >= 0 && i < bk->bk_niov);
bk->bk_iovs[i].kiov_offset = 0; bk->bk_iovs[i].bv_offset = 0;
bk->bk_iovs[i].kiov_page = pg; bk->bk_iovs[i].bv_page = pg;
bk->bk_iovs[i].kiov_len = nob; bk->bk_iovs[i].bv_len = nob;
return nob; return nob;
} }
...@@ -106,7 +106,7 @@ srpc_free_bulk(struct srpc_bulk *bk) ...@@ -106,7 +106,7 @@ srpc_free_bulk(struct srpc_bulk *bk)
LASSERT(bk); LASSERT(bk);
for (i = 0; i < bk->bk_niov; i++) { for (i = 0; i < bk->bk_niov; i++) {
pg = bk->bk_iovs[i].kiov_page; pg = bk->bk_iovs[i].bv_page;
if (!pg) if (!pg)
break; break;
......
...@@ -1864,8 +1864,7 @@ void osc_dec_unstable_pages(struct ptlrpc_request *req) ...@@ -1864,8 +1864,7 @@ void osc_dec_unstable_pages(struct ptlrpc_request *req)
LASSERT(page_count >= 0); LASSERT(page_count >= 0);
for (i = 0; i < page_count; i++) for (i = 0; i < page_count; i++)
dec_node_page_state(desc->bd_iov[i].kiov_page, dec_node_page_state(desc->bd_iov[i].bv_page, NR_UNSTABLE_NFS);
NR_UNSTABLE_NFS);
atomic_sub(page_count, &cli->cl_cache->ccc_unstable_nr); atomic_sub(page_count, &cli->cl_cache->ccc_unstable_nr);
LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0); LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
...@@ -1899,8 +1898,7 @@ void osc_inc_unstable_pages(struct ptlrpc_request *req) ...@@ -1899,8 +1898,7 @@ void osc_inc_unstable_pages(struct ptlrpc_request *req)
LASSERT(page_count >= 0); LASSERT(page_count >= 0);
for (i = 0; i < page_count; i++) for (i = 0; i < page_count; i++)
inc_node_page_state(desc->bd_iov[i].kiov_page, inc_node_page_state(desc->bd_iov[i].bv_page, NR_UNSTABLE_NFS);
NR_UNSTABLE_NFS);
LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0); LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr); atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr);
......
...@@ -202,7 +202,7 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin) ...@@ -202,7 +202,7 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
if (unpin) { if (unpin) {
for (i = 0; i < desc->bd_iov_count; i++) for (i = 0; i < desc->bd_iov_count; i++)
put_page(desc->bd_iov[i].kiov_page); put_page(desc->bd_iov[i].bv_page);
} }
kfree(desc); kfree(desc);
......
...@@ -64,9 +64,9 @@ void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page, ...@@ -64,9 +64,9 @@ void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page,
{ {
lnet_kiov_t *kiov = &desc->bd_iov[desc->bd_iov_count]; lnet_kiov_t *kiov = &desc->bd_iov[desc->bd_iov_count];
kiov->kiov_page = page; kiov->bv_page = page;
kiov->kiov_offset = pageoffset; kiov->bv_offset = pageoffset;
kiov->kiov_len = len; kiov->bv_len = len;
desc->bd_iov_count++; desc->bd_iov_count++;
} }
...@@ -326,12 +326,12 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) ...@@ -326,12 +326,12 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
LASSERT(page_pools.epp_pools[p_idx]); LASSERT(page_pools.epp_pools[p_idx]);
for (i = 0; i < desc->bd_iov_count; i++) { for (i = 0; i < desc->bd_iov_count; i++) {
LASSERT(desc->bd_enc_iov[i].kiov_page); LASSERT(desc->bd_enc_iov[i].bv_page);
LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]); LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
LASSERT(!page_pools.epp_pools[p_idx][g_idx]); LASSERT(!page_pools.epp_pools[p_idx][g_idx]);
page_pools.epp_pools[p_idx][g_idx] = page_pools.epp_pools[p_idx][g_idx] =
desc->bd_enc_iov[i].kiov_page; desc->bd_enc_iov[i].bv_page;
if (++g_idx == PAGES_PER_POOL) { if (++g_idx == PAGES_PER_POOL) {
p_idx++; p_idx++;
...@@ -522,9 +522,9 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg, ...@@ -522,9 +522,9 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]); hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]);
for (i = 0; i < desc->bd_iov_count; i++) { for (i = 0; i < desc->bd_iov_count; i++) {
cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page, cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].bv_page,
desc->bd_iov[i].kiov_offset & ~PAGE_MASK, desc->bd_iov[i].bv_offset & ~PAGE_MASK,
desc->bd_iov[i].kiov_len); desc->bd_iov[i].bv_len);
} }
if (hashsize > buflen) { if (hashsize > buflen) {
......
...@@ -154,13 +154,13 @@ static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc) ...@@ -154,13 +154,13 @@ static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
unsigned int off, i; unsigned int off, i;
for (i = 0; i < desc->bd_iov_count; i++) { for (i = 0; i < desc->bd_iov_count; i++) {
if (desc->bd_iov[i].kiov_len == 0) if (desc->bd_iov[i].bv_len == 0)
continue; continue;
ptr = kmap(desc->bd_iov[i].kiov_page); ptr = kmap(desc->bd_iov[i].bv_page);
off = desc->bd_iov[i].kiov_offset & ~PAGE_MASK; off = desc->bd_iov[i].bv_offset & ~PAGE_MASK;
ptr[off] ^= 0x1; ptr[off] ^= 0x1;
kunmap(desc->bd_iov[i].kiov_page); kunmap(desc->bd_iov[i].bv_page);
return; return;
} }
} }
...@@ -349,11 +349,11 @@ int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx, ...@@ -349,11 +349,11 @@ int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
/* fix the actual data size */ /* fix the actual data size */
for (i = 0, nob = 0; i < desc->bd_iov_count; i++) { for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
if (desc->bd_iov[i].kiov_len + nob > desc->bd_nob_transferred) { if (desc->bd_iov[i].bv_len + nob > desc->bd_nob_transferred) {
desc->bd_iov[i].kiov_len = desc->bd_iov[i].bv_len =
desc->bd_nob_transferred - nob; desc->bd_nob_transferred - nob;
} }
nob += desc->bd_iov[i].kiov_len; nob += desc->bd_iov[i].bv_len;
} }
rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg, rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment