Commit 93a09e74 authored by Potnuri Bharat Teja's avatar Potnuri Bharat Teja Committed by David S. Miller

cxgb4: add adapter hotplug support for ULDs

Upon adapter hotplug, cxgb4 registers ULD devices for all the ULDs that
are already loaded, ensuring that ULD's can enumerate the hotplugged
adapter without reloading the ULD.
Signed-off-by: default avatarPotnuri Bharat Teja <bharat@chelsio.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 060b6381
...@@ -60,6 +60,7 @@ ...@@ -60,6 +60,7 @@
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
extern struct list_head adapter_list; extern struct list_head adapter_list;
extern struct list_head uld_list;
extern struct mutex uld_mutex; extern struct mutex uld_mutex;
/* Suspend an Ethernet Tx queue with fewer available descriptors than this. /* Suspend an Ethernet Tx queue with fewer available descriptors than this.
...@@ -822,6 +823,13 @@ struct sge_uld_txq_info { ...@@ -822,6 +823,13 @@ struct sge_uld_txq_info {
u16 ntxq; /* # of egress uld queues */ u16 ntxq; /* # of egress uld queues */
}; };
/* struct to maintain ULD list to reallocate ULD resources on hotplug */
struct cxgb4_uld_list {
struct cxgb4_uld_info uld_info;
struct list_head list_node;
enum cxgb4_uld uld_type;
};
enum sge_eosw_state { enum sge_eosw_state {
CXGB4_EO_STATE_CLOSED = 0, /* Not ready to accept traffic */ CXGB4_EO_STATE_CLOSED = 0, /* Not ready to accept traffic */
CXGB4_EO_STATE_FLOWC_OPEN_SEND, /* Send FLOWC open request */ CXGB4_EO_STATE_FLOWC_OPEN_SEND, /* Send FLOWC open request */
......
...@@ -180,6 +180,7 @@ static struct dentry *cxgb4_debugfs_root; ...@@ -180,6 +180,7 @@ static struct dentry *cxgb4_debugfs_root;
LIST_HEAD(adapter_list); LIST_HEAD(adapter_list);
DEFINE_MUTEX(uld_mutex); DEFINE_MUTEX(uld_mutex);
LIST_HEAD(uld_list);
static int cfg_queues(struct adapter *adap); static int cfg_queues(struct adapter *adap);
...@@ -6519,11 +6520,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -6519,11 +6520,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* PCIe EEH recovery on powerpc platforms needs fundamental reset */ /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
pdev->needs_freset = 1; pdev->needs_freset = 1;
if (is_uld(adapter)) { if (is_uld(adapter))
mutex_lock(&uld_mutex); cxgb4_uld_enable(adapter);
list_add_tail(&adapter->list_node, &adapter_list);
mutex_unlock(&uld_mutex);
}
if (!is_t4(adapter->params.chip)) if (!is_t4(adapter->params.chip))
cxgb4_ptp_init(adapter); cxgb4_ptp_init(adapter);
......
...@@ -681,29 +681,17 @@ static void cxgb4_set_ktls_feature(struct adapter *adap, bool enable) ...@@ -681,29 +681,17 @@ static void cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
} }
#endif #endif
/* cxgb4_register_uld - register an upper-layer driver static void cxgb4_uld_alloc_resources(struct adapter *adap,
* @type: the ULD type enum cxgb4_uld type,
* @p: the ULD methods
*
* Registers an upper-layer driver with this driver and notifies the ULD
* about any presently available devices that support its type.
*/
void cxgb4_register_uld(enum cxgb4_uld type,
const struct cxgb4_uld_info *p) const struct cxgb4_uld_info *p)
{ {
struct adapter *adap;
int ret = 0; int ret = 0;
if (type >= CXGB4_ULD_MAX)
return;
mutex_lock(&uld_mutex);
list_for_each_entry(adap, &adapter_list, list_node) {
if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
(type != CXGB4_ULD_CRYPTO && !is_offload(adap))) (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
continue; return;
if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
continue; return;
ret = cfg_queues_uld(adap, type, p); ret = cfg_queues_uld(adap, type, p);
if (ret) if (ret)
goto out; goto out;
...@@ -732,7 +720,7 @@ void cxgb4_register_uld(enum cxgb4_uld type, ...@@ -732,7 +720,7 @@ void cxgb4_register_uld(enum cxgb4_uld type,
ret = uld_attach(adap, type); ret = uld_attach(adap, type);
if (ret) if (ret)
goto free_txq; goto free_txq;
continue; return;
free_txq: free_txq:
release_sge_txq_uld(adap, type); release_sge_txq_uld(adap, type);
free_irq: free_irq:
...@@ -747,7 +735,47 @@ void cxgb4_register_uld(enum cxgb4_uld type, ...@@ -747,7 +735,47 @@ void cxgb4_register_uld(enum cxgb4_uld type,
out: out:
dev_warn(adap->pdev_dev, dev_warn(adap->pdev_dev,
"ULD registration failed for uld type %d\n", type); "ULD registration failed for uld type %d\n", type);
} }
void cxgb4_uld_enable(struct adapter *adap)
{
struct cxgb4_uld_list *uld_entry;
mutex_lock(&uld_mutex);
list_add_tail(&adap->list_node, &adapter_list);
list_for_each_entry(uld_entry, &uld_list, list_node)
cxgb4_uld_alloc_resources(adap, uld_entry->uld_type,
&uld_entry->uld_info);
mutex_unlock(&uld_mutex);
}
/* cxgb4_register_uld - register an upper-layer driver
* @type: the ULD type
* @p: the ULD methods
*
* Registers an upper-layer driver with this driver and notifies the ULD
* about any presently available devices that support its type.
*/
void cxgb4_register_uld(enum cxgb4_uld type,
const struct cxgb4_uld_info *p)
{
struct cxgb4_uld_list *uld_entry;
struct adapter *adap;
if (type >= CXGB4_ULD_MAX)
return;
uld_entry = kzalloc(sizeof(*uld_entry), GFP_KERNEL);
if (!uld_entry)
return;
memcpy(&uld_entry->uld_info, p, sizeof(struct cxgb4_uld_info));
mutex_lock(&uld_mutex);
list_for_each_entry(adap, &adapter_list, list_node)
cxgb4_uld_alloc_resources(adap, type, p);
uld_entry->uld_type = type;
list_add_tail(&uld_entry->list_node, &uld_list);
mutex_unlock(&uld_mutex); mutex_unlock(&uld_mutex);
return; return;
} }
...@@ -761,6 +789,7 @@ EXPORT_SYMBOL(cxgb4_register_uld); ...@@ -761,6 +789,7 @@ EXPORT_SYMBOL(cxgb4_register_uld);
*/ */
int cxgb4_unregister_uld(enum cxgb4_uld type) int cxgb4_unregister_uld(enum cxgb4_uld type)
{ {
struct cxgb4_uld_list *uld_entry, *tmp;
struct adapter *adap; struct adapter *adap;
if (type >= CXGB4_ULD_MAX) if (type >= CXGB4_ULD_MAX)
...@@ -783,6 +812,13 @@ int cxgb4_unregister_uld(enum cxgb4_uld type) ...@@ -783,6 +812,13 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
cxgb4_set_ktls_feature(adap, 0); cxgb4_set_ktls_feature(adap, 0);
#endif #endif
} }
list_for_each_entry_safe(uld_entry, tmp, &uld_list, list_node) {
if (uld_entry->uld_type == type) {
list_del(&uld_entry->list_node);
kfree(uld_entry);
}
}
mutex_unlock(&uld_mutex); mutex_unlock(&uld_mutex);
return 0; return 0;
......
...@@ -327,6 +327,7 @@ enum cxgb4_control { ...@@ -327,6 +327,7 @@ enum cxgb4_control {
CXGB4_CONTROL_DB_DROP, CXGB4_CONTROL_DB_DROP,
}; };
struct adapter;
struct pci_dev; struct pci_dev;
struct l2t_data; struct l2t_data;
struct net_device; struct net_device;
...@@ -465,6 +466,7 @@ struct cxgb4_uld_info { ...@@ -465,6 +466,7 @@ struct cxgb4_uld_info {
int (*tx_handler)(struct sk_buff *skb, struct net_device *dev); int (*tx_handler)(struct sk_buff *skb, struct net_device *dev);
}; };
void cxgb4_uld_enable(struct adapter *adap);
void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type); int cxgb4_unregister_uld(enum cxgb4_uld type);
int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb); int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment