Commit 93a09e74 authored by Potnuri Bharat Teja's avatar Potnuri Bharat Teja Committed by David S. Miller

cxgb4: add adapter hotplug support for ULDs

Upon adapter hotplug, cxgb4 registers ULD devices for all the ULDs that
are already loaded, ensuring that ULD's can enumerate the hotplugged
adapter without reloading the ULD.
Signed-off-by: default avatarPotnuri Bharat Teja <bharat@chelsio.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 060b6381
......@@ -60,6 +60,7 @@
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
extern struct list_head adapter_list;
extern struct list_head uld_list;
extern struct mutex uld_mutex;
/* Suspend an Ethernet Tx queue with fewer available descriptors than this.
......@@ -822,6 +823,13 @@ struct sge_uld_txq_info {
u16 ntxq; /* # of egress uld queues */
};
/* struct to maintain ULD list to reallocate ULD resources on hotplug */
struct cxgb4_uld_list {
struct cxgb4_uld_info uld_info;
struct list_head list_node;
enum cxgb4_uld uld_type;
};
enum sge_eosw_state {
CXGB4_EO_STATE_CLOSED = 0, /* Not ready to accept traffic */
CXGB4_EO_STATE_FLOWC_OPEN_SEND, /* Send FLOWC open request */
......
......@@ -180,6 +180,7 @@ static struct dentry *cxgb4_debugfs_root;
LIST_HEAD(adapter_list);
DEFINE_MUTEX(uld_mutex);
LIST_HEAD(uld_list);
static int cfg_queues(struct adapter *adap);
......@@ -6519,11 +6520,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
pdev->needs_freset = 1;
if (is_uld(adapter)) {
mutex_lock(&uld_mutex);
list_add_tail(&adapter->list_node, &adapter_list);
mutex_unlock(&uld_mutex);
}
if (is_uld(adapter))
cxgb4_uld_enable(adapter);
if (!is_t4(adapter->params.chip))
cxgb4_ptp_init(adapter);
......
......@@ -681,6 +681,74 @@ static void cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
}
#endif
static void cxgb4_uld_alloc_resources(struct adapter *adap,
enum cxgb4_uld type,
const struct cxgb4_uld_info *p)
{
int ret = 0;
if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
(type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
return;
if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
return;
ret = cfg_queues_uld(adap, type, p);
if (ret)
goto out;
ret = setup_sge_queues_uld(adap, type, p->lro);
if (ret)
goto free_queues;
if (adap->flags & CXGB4_USING_MSIX) {
ret = request_msix_queue_irqs_uld(adap, type);
if (ret)
goto free_rxq;
}
if (adap->flags & CXGB4_FULL_INIT_DONE)
enable_rx_uld(adap, type);
#ifdef CONFIG_CHELSIO_TLS_DEVICE
/* send mbox to enable ktls related settings. */
if (type == CXGB4_ULD_CRYPTO &&
(adap->params.crypto & FW_CAPS_CONFIG_TX_TLS_HW))
cxgb4_set_ktls_feature(adap, 1);
#endif
if (adap->uld[type].add)
goto free_irq;
ret = setup_sge_txq_uld(adap, type, p);
if (ret)
goto free_irq;
adap->uld[type] = *p;
ret = uld_attach(adap, type);
if (ret)
goto free_txq;
return;
free_txq:
release_sge_txq_uld(adap, type);
free_irq:
if (adap->flags & CXGB4_FULL_INIT_DONE)
quiesce_rx_uld(adap, type);
if (adap->flags & CXGB4_USING_MSIX)
free_msix_queue_irqs_uld(adap, type);
free_rxq:
free_sge_queues_uld(adap, type);
free_queues:
free_queues_uld(adap, type);
out:
dev_warn(adap->pdev_dev,
"ULD registration failed for uld type %d\n", type);
}
void cxgb4_uld_enable(struct adapter *adap)
{
struct cxgb4_uld_list *uld_entry;
mutex_lock(&uld_mutex);
list_add_tail(&adap->list_node, &adapter_list);
list_for_each_entry(uld_entry, &uld_list, list_node)
cxgb4_uld_alloc_resources(adap, uld_entry->uld_type,
&uld_entry->uld_info);
mutex_unlock(&uld_mutex);
}
/* cxgb4_register_uld - register an upper-layer driver
* @type: the ULD type
* @p: the ULD methods
......@@ -691,63 +759,23 @@ static void cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
void cxgb4_register_uld(enum cxgb4_uld type,
const struct cxgb4_uld_info *p)
{
struct cxgb4_uld_list *uld_entry;
struct adapter *adap;
int ret = 0;
if (type >= CXGB4_ULD_MAX)
return;
uld_entry = kzalloc(sizeof(*uld_entry), GFP_KERNEL);
if (!uld_entry)
return;
memcpy(&uld_entry->uld_info, p, sizeof(struct cxgb4_uld_info));
mutex_lock(&uld_mutex);
list_for_each_entry(adap, &adapter_list, list_node) {
if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
(type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
continue;
if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
continue;
ret = cfg_queues_uld(adap, type, p);
if (ret)
goto out;
ret = setup_sge_queues_uld(adap, type, p->lro);
if (ret)
goto free_queues;
if (adap->flags & CXGB4_USING_MSIX) {
ret = request_msix_queue_irqs_uld(adap, type);
if (ret)
goto free_rxq;
}
if (adap->flags & CXGB4_FULL_INIT_DONE)
enable_rx_uld(adap, type);
#ifdef CONFIG_CHELSIO_TLS_DEVICE
/* send mbox to enable ktls related settings. */
if (type == CXGB4_ULD_CRYPTO &&
(adap->params.crypto & FW_CAPS_CONFIG_TX_TLS_HW))
cxgb4_set_ktls_feature(adap, 1);
#endif
if (adap->uld[type].add)
goto free_irq;
ret = setup_sge_txq_uld(adap, type, p);
if (ret)
goto free_irq;
adap->uld[type] = *p;
ret = uld_attach(adap, type);
if (ret)
goto free_txq;
continue;
free_txq:
release_sge_txq_uld(adap, type);
free_irq:
if (adap->flags & CXGB4_FULL_INIT_DONE)
quiesce_rx_uld(adap, type);
if (adap->flags & CXGB4_USING_MSIX)
free_msix_queue_irqs_uld(adap, type);
free_rxq:
free_sge_queues_uld(adap, type);
free_queues:
free_queues_uld(adap, type);
out:
dev_warn(adap->pdev_dev,
"ULD registration failed for uld type %d\n", type);
}
list_for_each_entry(adap, &adapter_list, list_node)
cxgb4_uld_alloc_resources(adap, type, p);
uld_entry->uld_type = type;
list_add_tail(&uld_entry->list_node, &uld_list);
mutex_unlock(&uld_mutex);
return;
}
......@@ -761,6 +789,7 @@ EXPORT_SYMBOL(cxgb4_register_uld);
*/
int cxgb4_unregister_uld(enum cxgb4_uld type)
{
struct cxgb4_uld_list *uld_entry, *tmp;
struct adapter *adap;
if (type >= CXGB4_ULD_MAX)
......@@ -783,6 +812,13 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
cxgb4_set_ktls_feature(adap, 0);
#endif
}
list_for_each_entry_safe(uld_entry, tmp, &uld_list, list_node) {
if (uld_entry->uld_type == type) {
list_del(&uld_entry->list_node);
kfree(uld_entry);
}
}
mutex_unlock(&uld_mutex);
return 0;
......
......@@ -327,6 +327,7 @@ enum cxgb4_control {
CXGB4_CONTROL_DB_DROP,
};
struct adapter;
struct pci_dev;
struct l2t_data;
struct net_device;
......@@ -465,6 +466,7 @@ struct cxgb4_uld_info {
int (*tx_handler)(struct sk_buff *skb, struct net_device *dev);
};
void cxgb4_uld_enable(struct adapter *adap);
void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type);
int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment