Commit 4fa56ad0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "Nothing very exciting here, just a few small bug fixes. No red flags
  for this release have shown up.

   - Regression from the last pull request in cxgb4 related to the ipv6
     fixes

   - KASAN crasher in rtrs

   - oops in hfi1 related to a buggy BIOS

   - Userspace could oops qedr's XRC support

   - Uninitialized memory when parsing a LS_NLA_TYPE_DGID netlink
     message"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/addr: Be strict with gid size
  RDMA/qedr: Fix kernel panic when trying to access recv_cq
  IB/hfi1: Fix probe time panic when AIP is enabled with a buggy BIOS
  RDMA/cxgb4: check for ipv6 address properly while destroying listener
  RDMA/rtrs-clt: Close rtrs client conn before destroying rtrs clt session files
parents 3fb4f979 d1c803a9
...@@ -76,7 +76,9 @@ static struct workqueue_struct *addr_wq; ...@@ -76,7 +76,9 @@ static struct workqueue_struct *addr_wq;
static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = { static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = {
[LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
.len = sizeof(struct rdma_nla_ls_gid)}, .len = sizeof(struct rdma_nla_ls_gid),
.validation_type = NLA_VALIDATE_MIN,
.min = sizeof(struct rdma_nla_ls_gid)},
}; };
static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh) static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)
......
...@@ -3616,7 +3616,8 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id) ...@@ -3616,7 +3616,8 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
c4iw_init_wr_wait(ep->com.wr_waitp); c4iw_init_wr_wait(ep->com.wr_waitp);
err = cxgb4_remove_server( err = cxgb4_remove_server(
ep->com.dev->rdev.lldi.ports[0], ep->stid, ep->com.dev->rdev.lldi.ports[0], ep->stid,
ep->com.dev->rdev.lldi.rxq_ids[0], true); ep->com.dev->rdev.lldi.rxq_ids[0],
ep->com.local_addr.ss_family == AF_INET6);
if (err) if (err)
goto done; goto done;
err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp, err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
......
...@@ -632,22 +632,11 @@ static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd, ...@@ -632,22 +632,11 @@ static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd,
*/ */
int hfi1_dev_affinity_init(struct hfi1_devdata *dd) int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
{ {
int node = pcibus_to_node(dd->pcidev->bus);
struct hfi1_affinity_node *entry; struct hfi1_affinity_node *entry;
const struct cpumask *local_mask; const struct cpumask *local_mask;
int curr_cpu, possible, i, ret; int curr_cpu, possible, i, ret;
bool new_entry = false; bool new_entry = false;
/*
* If the BIOS does not have the NUMA node information set, select
* NUMA 0 so we get consistent performance.
*/
if (node < 0) {
dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
node = 0;
}
dd->node = node;
local_mask = cpumask_of_node(dd->node); local_mask = cpumask_of_node(dd->node);
if (cpumask_first(local_mask) >= nr_cpu_ids) if (cpumask_first(local_mask) >= nr_cpu_ids)
local_mask = topology_core_cpumask(0); local_mask = topology_core_cpumask(0);
...@@ -660,7 +649,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd) ...@@ -660,7 +649,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
* create an entry in the global affinity structure and initialize it. * create an entry in the global affinity structure and initialize it.
*/ */
if (!entry) { if (!entry) {
entry = node_affinity_allocate(node); entry = node_affinity_allocate(dd->node);
if (!entry) { if (!entry) {
dd_dev_err(dd, dd_dev_err(dd,
"Unable to allocate global affinity node\n"); "Unable to allocate global affinity node\n");
...@@ -751,6 +740,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd) ...@@ -751,6 +740,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
if (new_entry) if (new_entry)
node_affinity_add_tail(entry); node_affinity_add_tail(entry);
dd->affinity_entry = entry;
mutex_unlock(&node_affinity.lock); mutex_unlock(&node_affinity.lock);
return 0; return 0;
...@@ -766,10 +756,9 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd) ...@@ -766,10 +756,9 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
{ {
struct hfi1_affinity_node *entry; struct hfi1_affinity_node *entry;
if (dd->node < 0)
return;
mutex_lock(&node_affinity.lock); mutex_lock(&node_affinity.lock);
if (!dd->affinity_entry)
goto unlock;
entry = node_affinity_lookup(dd->node); entry = node_affinity_lookup(dd->node);
if (!entry) if (!entry)
goto unlock; goto unlock;
...@@ -780,8 +769,8 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd) ...@@ -780,8 +769,8 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
*/ */
_dev_comp_vect_cpu_mask_clean_up(dd, entry); _dev_comp_vect_cpu_mask_clean_up(dd, entry);
unlock: unlock:
dd->affinity_entry = NULL;
mutex_unlock(&node_affinity.lock); mutex_unlock(&node_affinity.lock);
dd->node = NUMA_NO_NODE;
} }
/* /*
......
...@@ -1409,6 +1409,7 @@ struct hfi1_devdata { ...@@ -1409,6 +1409,7 @@ struct hfi1_devdata {
spinlock_t irq_src_lock; spinlock_t irq_src_lock;
int vnic_num_vports; int vnic_num_vports;
struct net_device *dummy_netdev; struct net_device *dummy_netdev;
struct hfi1_affinity_node *affinity_entry;
/* Keeps track of IPoIB RSM rule users */ /* Keeps track of IPoIB RSM rule users */
atomic_t ipoib_rsm_usr_num; atomic_t ipoib_rsm_usr_num;
......
...@@ -1277,7 +1277,6 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, ...@@ -1277,7 +1277,6 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
dd->pport = (struct hfi1_pportdata *)(dd + 1); dd->pport = (struct hfi1_pportdata *)(dd + 1);
dd->pcidev = pdev; dd->pcidev = pdev;
pci_set_drvdata(pdev, dd); pci_set_drvdata(pdev, dd);
dd->node = NUMA_NO_NODE;
ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b, ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b,
GFP_KERNEL); GFP_KERNEL);
...@@ -1287,6 +1286,15 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, ...@@ -1287,6 +1286,15 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
goto bail; goto bail;
} }
rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit); rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
/*
* If the BIOS does not have the NUMA node information set, select
* NUMA 0 so we get consistent performance.
*/
dd->node = pcibus_to_node(pdev->bus);
if (dd->node == NUMA_NO_NODE) {
dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
dd->node = 0;
}
/* /*
* Initialize all locks for the device. This needs to be as early as * Initialize all locks for the device. This needs to be as early as
......
...@@ -173,8 +173,7 @@ u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts, ...@@ -173,8 +173,7 @@ u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
return 0; return 0;
} }
cpumask_and(node_cpu_mask, cpu_mask, cpumask_and(node_cpu_mask, cpu_mask, cpumask_of_node(dd->node));
cpumask_of_node(pcibus_to_node(dd->pcidev->bus)));
available_cpus = cpumask_weight(node_cpu_mask); available_cpus = cpumask_weight(node_cpu_mask);
......
...@@ -1244,7 +1244,8 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev, ...@@ -1244,7 +1244,8 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
* TGT QP isn't associated with RQ/SQ * TGT QP isn't associated with RQ/SQ
*/ */
if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) && if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
(attrs->qp_type != IB_QPT_XRC_TGT)) { (attrs->qp_type != IB_QPT_XRC_TGT) &&
(attrs->qp_type != IB_QPT_XRC_INI)) {
struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq); struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq); struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
......
...@@ -2720,8 +2720,8 @@ void rtrs_clt_close(struct rtrs_clt *clt) ...@@ -2720,8 +2720,8 @@ void rtrs_clt_close(struct rtrs_clt *clt)
/* Now it is safe to iterate over all paths without locks */ /* Now it is safe to iterate over all paths without locks */
list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) { list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
rtrs_clt_destroy_sess_files(sess, NULL);
rtrs_clt_close_conns(sess, true); rtrs_clt_close_conns(sess, true);
rtrs_clt_destroy_sess_files(sess, NULL);
kobject_put(&sess->kobj); kobject_put(&sess->kobj);
} }
free_clt(clt); free_clt(clt);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment