Commit 865be780 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ntb-4.12-bugfixes' of git://github.com/jonmason/ntb

Pull NTB fixes from Jon Mason:
 "NTB bug fixes to address the modinfo in ntb_perf, a couple of bugs in
  the NTB transport QP calculations, skx doorbells, and sleeping in
  ntb_async_tx_submit"

* tag 'ntb-4.12-bugfixes' of git://github.com/jonmason/ntb:
  ntb: no sleep in ntb_async_tx_submit
  ntb: ntb_hw_intel: Skylake doorbells should be 32bits, not 64bits
  ntb_transport: fix bug calculating num_qps_mw
  ntb_transport: fix qp count bug
  NTB: ntb_test: fix bug printing ntb_perf results
  ntb: Correct modinfo usage statement for ntb_perf
parents 41f1830f 88931ec3
...@@ -2878,7 +2878,7 @@ static const struct intel_ntb_reg skx_reg = { ...@@ -2878,7 +2878,7 @@ static const struct intel_ntb_reg skx_reg = {
.link_is_up = xeon_link_is_up, .link_is_up = xeon_link_is_up,
.db_ioread = skx_db_ioread, .db_ioread = skx_db_ioread,
.db_iowrite = skx_db_iowrite, .db_iowrite = skx_db_iowrite,
.db_size = sizeof(u64), .db_size = sizeof(u32),
.ntb_ctl = SKX_NTBCNTL_OFFSET, .ntb_ctl = SKX_NTBCNTL_OFFSET,
.mw_bar = {2, 4}, .mw_bar = {2, 4},
}; };
......
...@@ -177,14 +177,12 @@ struct ntb_transport_qp { ...@@ -177,14 +177,12 @@ struct ntb_transport_qp {
u64 rx_err_ver; u64 rx_err_ver;
u64 rx_memcpy; u64 rx_memcpy;
u64 rx_async; u64 rx_async;
u64 dma_rx_prep_err;
u64 tx_bytes; u64 tx_bytes;
u64 tx_pkts; u64 tx_pkts;
u64 tx_ring_full; u64 tx_ring_full;
u64 tx_err_no_buf; u64 tx_err_no_buf;
u64 tx_memcpy; u64 tx_memcpy;
u64 tx_async; u64 tx_async;
u64 dma_tx_prep_err;
}; };
struct ntb_transport_mw { struct ntb_transport_mw {
...@@ -254,8 +252,6 @@ enum { ...@@ -254,8 +252,6 @@ enum {
#define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
#define NTB_QP_DEF_NUM_ENTRIES 100 #define NTB_QP_DEF_NUM_ENTRIES 100
#define NTB_LINK_DOWN_TIMEOUT 10 #define NTB_LINK_DOWN_TIMEOUT 10
#define DMA_RETRIES 20
#define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50)
static void ntb_transport_rxc_db(unsigned long data); static void ntb_transport_rxc_db(unsigned long data);
static const struct ntb_ctx_ops ntb_transport_ops; static const struct ntb_ctx_ops ntb_transport_ops;
...@@ -516,12 +512,6 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, ...@@ -516,12 +512,6 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
out_offset += snprintf(buf + out_offset, out_count - out_offset, out_offset += snprintf(buf + out_offset, out_count - out_offset,
"free tx - \t%u\n", "free tx - \t%u\n",
ntb_transport_tx_free_entry(qp)); ntb_transport_tx_free_entry(qp));
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"DMA tx prep err - \t%llu\n",
qp->dma_tx_prep_err);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"DMA rx prep err - \t%llu\n",
qp->dma_rx_prep_err);
out_offset += snprintf(buf + out_offset, out_count - out_offset, out_offset += snprintf(buf + out_offset, out_count - out_offset,
"\n"); "\n");
...@@ -623,7 +613,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, ...@@ -623,7 +613,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
if (!mw->virt_addr) if (!mw->virt_addr)
return -ENOMEM; return -ENOMEM;
if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) if (mw_num < qp_count % mw_count)
num_qps_mw = qp_count / mw_count + 1; num_qps_mw = qp_count / mw_count + 1;
else else
num_qps_mw = qp_count / mw_count; num_qps_mw = qp_count / mw_count;
...@@ -768,8 +758,6 @@ static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) ...@@ -768,8 +758,6 @@ static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
qp->tx_err_no_buf = 0; qp->tx_err_no_buf = 0;
qp->tx_memcpy = 0; qp->tx_memcpy = 0;
qp->tx_async = 0; qp->tx_async = 0;
qp->dma_tx_prep_err = 0;
qp->dma_rx_prep_err = 0;
} }
static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
...@@ -1000,7 +988,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, ...@@ -1000,7 +988,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
qp->event_handler = NULL; qp->event_handler = NULL;
ntb_qp_link_down_reset(qp); ntb_qp_link_down_reset(qp);
if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) if (mw_num < qp_count % mw_count)
num_qps_mw = qp_count / mw_count + 1; num_qps_mw = qp_count / mw_count + 1;
else else
num_qps_mw = qp_count / mw_count; num_qps_mw = qp_count / mw_count;
...@@ -1128,8 +1116,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) ...@@ -1128,8 +1116,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
qp_count = ilog2(qp_bitmap); qp_count = ilog2(qp_bitmap);
if (max_num_clients && max_num_clients < qp_count) if (max_num_clients && max_num_clients < qp_count)
qp_count = max_num_clients; qp_count = max_num_clients;
else if (mw_count < qp_count) else if (nt->mw_count < qp_count)
qp_count = mw_count; qp_count = nt->mw_count;
qp_bitmap &= BIT_ULL(qp_count) - 1; qp_bitmap &= BIT_ULL(qp_count) - 1;
...@@ -1317,7 +1305,6 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset) ...@@ -1317,7 +1305,6 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
struct dmaengine_unmap_data *unmap; struct dmaengine_unmap_data *unmap;
dma_cookie_t cookie; dma_cookie_t cookie;
void *buf = entry->buf; void *buf = entry->buf;
int retries = 0;
len = entry->len; len = entry->len;
device = chan->device; device = chan->device;
...@@ -1346,22 +1333,11 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset) ...@@ -1346,22 +1333,11 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
unmap->from_cnt = 1; unmap->from_cnt = 1;
for (retries = 0; retries < DMA_RETRIES; retries++) { txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
txd = device->device_prep_dma_memcpy(chan, unmap->addr[0], len,
unmap->addr[1], DMA_PREP_INTERRUPT);
unmap->addr[0], len, if (!txd)
DMA_PREP_INTERRUPT);
if (txd)
break;
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(DMA_OUT_RESOURCE_TO);
}
if (!txd) {
qp->dma_rx_prep_err++;
goto err_get_unmap; goto err_get_unmap;
}
txd->callback_result = ntb_rx_copy_callback; txd->callback_result = ntb_rx_copy_callback;
txd->callback_param = entry; txd->callback_param = entry;
...@@ -1606,7 +1582,6 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp, ...@@ -1606,7 +1582,6 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
struct dmaengine_unmap_data *unmap; struct dmaengine_unmap_data *unmap;
dma_addr_t dest; dma_addr_t dest;
dma_cookie_t cookie; dma_cookie_t cookie;
int retries = 0;
device = chan->device; device = chan->device;
dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index; dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
...@@ -1628,21 +1603,10 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp, ...@@ -1628,21 +1603,10 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
unmap->to_cnt = 1; unmap->to_cnt = 1;
for (retries = 0; retries < DMA_RETRIES; retries++) { txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
txd = device->device_prep_dma_memcpy(chan, dest, DMA_PREP_INTERRUPT);
unmap->addr[0], len, if (!txd)
DMA_PREP_INTERRUPT);
if (txd)
break;
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(DMA_OUT_RESOURCE_TO);
}
if (!txd) {
qp->dma_tx_prep_err++;
goto err_get_unmap; goto err_get_unmap;
}
txd->callback_result = ntb_tx_copy_callback; txd->callback_result = ntb_tx_copy_callback;
txd->callback_param = entry; txd->callback_param = entry;
......
...@@ -90,11 +90,11 @@ MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows"); ...@@ -90,11 +90,11 @@ MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
static unsigned int seg_order = 19; /* 512K */ static unsigned int seg_order = 19; /* 512K */
module_param(seg_order, uint, 0644); module_param(seg_order, uint, 0644);
MODULE_PARM_DESC(seg_order, "size order [n^2] of buffer segment for testing"); MODULE_PARM_DESC(seg_order, "size order [2^n] of buffer segment for testing");
static unsigned int run_order = 32; /* 4G */ static unsigned int run_order = 32; /* 4G */
module_param(run_order, uint, 0644); module_param(run_order, uint, 0644);
MODULE_PARM_DESC(run_order, "size order [n^2] of total data to transfer"); MODULE_PARM_DESC(run_order, "size order [2^n] of total data to transfer");
static bool use_dma; /* default to 0 */ static bool use_dma; /* default to 0 */
module_param(use_dma, bool, 0644); module_param(use_dma, bool, 0644);
......
...@@ -305,7 +305,7 @@ function perf_test() ...@@ -305,7 +305,7 @@ function perf_test()
echo "Running remote perf test $WITH DMA" echo "Running remote perf test $WITH DMA"
write_file "" $REMOTE_PERF/run write_file "" $REMOTE_PERF/run
echo -n " " echo -n " "
read_file $LOCAL_PERF/run read_file $REMOTE_PERF/run
echo " Passed" echo " Passed"
_modprobe -r ntb_perf _modprobe -r ntb_perf
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment