Commit e2c7114a authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

nfp: add async version of mailbox communication

Some control messages must be sent from atomic context.  The mailbox
takes sleeping locks and uses a waitqueue so add a "posted" version
of communication.

Trylock the semaphore and if that's successful kick of the device
communication.  The device communication will be completed from
a workqueue, which will also release the semaphore.

If locks are taken queue the message and return.  Schedule a
different workqueue to take the semaphore and run the communication.
Note that the there are currently no atomic users which would actually
need the return value, so all replies to posted messages are just
freed.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarDirk van der Merwe <dirk.vandermerwe@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d7053e04
...@@ -100,7 +100,7 @@ struct nfp_ccm { ...@@ -100,7 +100,7 @@ struct nfp_ccm {
u16 tag_alloc_last; u16 tag_alloc_last;
struct sk_buff_head replies; struct sk_buff_head replies;
struct wait_queue_head wq; wait_queue_head_t wq;
}; };
int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app); int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app);
...@@ -110,6 +110,10 @@ struct sk_buff * ...@@ -110,6 +110,10 @@ struct sk_buff *
nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb, nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
enum nfp_ccm_type type, unsigned int reply_size); enum nfp_ccm_type type, unsigned int reply_size);
int nfp_ccm_mbox_alloc(struct nfp_net *nn);
void nfp_ccm_mbox_free(struct nfp_net *nn);
int nfp_ccm_mbox_init(struct nfp_net *nn);
void nfp_ccm_mbox_clean(struct nfp_net *nn);
bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size); bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size);
struct sk_buff * struct sk_buff *
nfp_ccm_mbox_msg_alloc(struct nfp_net *nn, unsigned int req_size, nfp_ccm_mbox_msg_alloc(struct nfp_net *nn, unsigned int req_size,
...@@ -118,4 +122,6 @@ int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb, ...@@ -118,4 +122,6 @@ int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
enum nfp_ccm_type type, enum nfp_ccm_type type,
unsigned int reply_size, unsigned int reply_size,
unsigned int max_reply_size); unsigned int max_reply_size);
int nfp_ccm_mbox_post(struct nfp_net *nn, struct sk_buff *skb,
enum nfp_ccm_type type, unsigned int max_reply_size);
#endif #endif
...@@ -41,12 +41,14 @@ enum nfp_net_mbox_cmsg_state { ...@@ -41,12 +41,14 @@ enum nfp_net_mbox_cmsg_state {
* @err: error encountered during processing if any * @err: error encountered during processing if any
* @max_len: max(request_len, reply_len) * @max_len: max(request_len, reply_len)
* @exp_reply: expected reply length (0 means don't validate) * @exp_reply: expected reply length (0 means don't validate)
* @posted: the message was posted and nobody waits for the reply
*/ */
struct nfp_ccm_mbox_cmsg_cb { struct nfp_ccm_mbox_cmsg_cb {
enum nfp_net_mbox_cmsg_state state; enum nfp_net_mbox_cmsg_state state;
int err; int err;
unsigned int max_len; unsigned int max_len;
unsigned int exp_reply; unsigned int exp_reply;
bool posted;
}; };
static u32 nfp_ccm_mbox_max_msg(struct nfp_net *nn) static u32 nfp_ccm_mbox_max_msg(struct nfp_net *nn)
...@@ -65,6 +67,7 @@ nfp_ccm_mbox_msg_init(struct sk_buff *skb, unsigned int exp_reply, int max_len) ...@@ -65,6 +67,7 @@ nfp_ccm_mbox_msg_init(struct sk_buff *skb, unsigned int exp_reply, int max_len)
cb->err = 0; cb->err = 0;
cb->max_len = max_len; cb->max_len = max_len;
cb->exp_reply = exp_reply; cb->exp_reply = exp_reply;
cb->posted = false;
} }
static int nfp_ccm_mbox_maxlen(const struct sk_buff *skb) static int nfp_ccm_mbox_maxlen(const struct sk_buff *skb)
...@@ -96,6 +99,20 @@ static void nfp_ccm_mbox_set_busy(struct sk_buff *skb) ...@@ -96,6 +99,20 @@ static void nfp_ccm_mbox_set_busy(struct sk_buff *skb)
cb->state = NFP_NET_MBOX_CMSG_STATE_BUSY; cb->state = NFP_NET_MBOX_CMSG_STATE_BUSY;
} }
static bool nfp_ccm_mbox_is_posted(struct sk_buff *skb)
{
struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
return cb->posted;
}
static void nfp_ccm_mbox_mark_posted(struct sk_buff *skb)
{
struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
cb->posted = true;
}
static bool nfp_ccm_mbox_is_first(struct nfp_net *nn, struct sk_buff *skb) static bool nfp_ccm_mbox_is_first(struct nfp_net *nn, struct sk_buff *skb)
{ {
return skb_queue_is_first(&nn->mbox_cmsg.queue, skb); return skb_queue_is_first(&nn->mbox_cmsg.queue, skb);
...@@ -119,6 +136,8 @@ static void nfp_ccm_mbox_mark_next_runner(struct nfp_net *nn) ...@@ -119,6 +136,8 @@ static void nfp_ccm_mbox_mark_next_runner(struct nfp_net *nn)
cb = (void *)skb->cb; cb = (void *)skb->cb;
cb->state = NFP_NET_MBOX_CMSG_STATE_NEXT; cb->state = NFP_NET_MBOX_CMSG_STATE_NEXT;
if (cb->posted)
queue_work(nn->mbox_cmsg.workq, &nn->mbox_cmsg.runq_work);
} }
static void static void
...@@ -205,9 +224,7 @@ static void nfp_ccm_mbox_copy_out(struct nfp_net *nn, struct sk_buff *last) ...@@ -205,9 +224,7 @@ static void nfp_ccm_mbox_copy_out(struct nfp_net *nn, struct sk_buff *last)
while (true) { while (true) {
unsigned int length, offset, type; unsigned int length, offset, type;
struct nfp_ccm_hdr hdr; struct nfp_ccm_hdr hdr;
__be32 *skb_data;
u32 tlv_hdr; u32 tlv_hdr;
int i, cnt;
tlv_hdr = readl(data); tlv_hdr = readl(data);
type = FIELD_GET(NFP_NET_MBOX_TLV_TYPE, tlv_hdr); type = FIELD_GET(NFP_NET_MBOX_TLV_TYPE, tlv_hdr);
...@@ -278,20 +295,26 @@ static void nfp_ccm_mbox_copy_out(struct nfp_net *nn, struct sk_buff *last) ...@@ -278,20 +295,26 @@ static void nfp_ccm_mbox_copy_out(struct nfp_net *nn, struct sk_buff *last)
goto next_tlv; goto next_tlv;
} }
if (length <= skb->len) if (!cb->posted) {
__skb_trim(skb, length); __be32 *skb_data;
else int i, cnt;
skb_put(skb, length - skb->len);
if (length <= skb->len)
/* We overcopy here slightly, but that's okay, the skb is large __skb_trim(skb, length);
* enough, and the garbage will be ignored (beyond skb->len). else
*/ skb_put(skb, length - skb->len);
skb_data = (__be32 *)skb->data;
memcpy(skb_data, &hdr, 4); /* We overcopy here slightly, but that's okay,
* the skb is large enough, and the garbage will
cnt = DIV_ROUND_UP(length, 4); * be ignored (beyond skb->len).
for (i = 1 ; i < cnt; i++) */
skb_data[i] = cpu_to_be32(readl(data + i * 4)); skb_data = (__be32 *)skb->data;
memcpy(skb_data, &hdr, 4);
cnt = DIV_ROUND_UP(length, 4);
for (i = 1 ; i < cnt; i++)
skb_data[i] = cpu_to_be32(readl(data + i * 4));
}
cb->state = NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND; cb->state = NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND;
next_tlv: next_tlv:
...@@ -314,6 +337,14 @@ static void nfp_ccm_mbox_copy_out(struct nfp_net *nn, struct sk_buff *last) ...@@ -314,6 +337,14 @@ static void nfp_ccm_mbox_copy_out(struct nfp_net *nn, struct sk_buff *last)
smp_wmb(); /* order the cb->err vs. cb->state */ smp_wmb(); /* order the cb->err vs. cb->state */
} }
cb->state = NFP_NET_MBOX_CMSG_STATE_DONE; cb->state = NFP_NET_MBOX_CMSG_STATE_DONE;
if (cb->posted) {
if (cb->err)
nn_dp_warn(&nn->dp,
"mailbox posted msg failed type:%u err:%d\n",
nfp_ccm_get_type(skb), cb->err);
dev_consume_skb_any(skb);
}
} while (skb != last); } while (skb != last);
nfp_ccm_mbox_mark_next_runner(nn); nfp_ccm_mbox_mark_next_runner(nn);
...@@ -563,6 +594,89 @@ int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb, ...@@ -563,6 +594,89 @@ int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
return err; return err;
} }
static void nfp_ccm_mbox_post_runq_work(struct work_struct *work)
{
struct sk_buff *skb;
struct nfp_net *nn;
nn = container_of(work, struct nfp_net, mbox_cmsg.runq_work);
spin_lock_bh(&nn->mbox_cmsg.queue.lock);
skb = __skb_peek(&nn->mbox_cmsg.queue);
if (WARN_ON(!skb || !nfp_ccm_mbox_is_posted(skb) ||
!nfp_ccm_mbox_should_run(nn, skb))) {
spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
return;
}
nfp_ccm_mbox_run_queue_unlock(nn);
}
static void nfp_ccm_mbox_post_wait_work(struct work_struct *work)
{
struct sk_buff *skb;
struct nfp_net *nn;
int err;
nn = container_of(work, struct nfp_net, mbox_cmsg.wait_work);
skb = skb_peek(&nn->mbox_cmsg.queue);
if (WARN_ON(!skb || !nfp_ccm_mbox_is_posted(skb)))
/* Should never happen so it's unclear what to do here.. */
goto exit_unlock_wake;
err = nfp_net_mbox_reconfig_wait_posted(nn);
if (!err)
nfp_ccm_mbox_copy_out(nn, skb);
else
nfp_ccm_mbox_mark_all_err(nn, skb, -EIO);
exit_unlock_wake:
nn_ctrl_bar_unlock(nn);
wake_up_all(&nn->mbox_cmsg.wq);
}
int nfp_ccm_mbox_post(struct nfp_net *nn, struct sk_buff *skb,
enum nfp_ccm_type type, unsigned int max_reply_size)
{
int err;
err = nfp_ccm_mbox_msg_prepare(nn, skb, type, 0, max_reply_size,
GFP_ATOMIC);
if (err)
goto err_free_skb;
nfp_ccm_mbox_mark_posted(skb);
spin_lock_bh(&nn->mbox_cmsg.queue.lock);
err = nfp_ccm_mbox_msg_enqueue(nn, skb, type);
if (err)
goto err_unlock;
if (nfp_ccm_mbox_is_first(nn, skb)) {
if (nn_ctrl_bar_trylock(nn)) {
nfp_ccm_mbox_copy_in(nn, skb);
nfp_net_mbox_reconfig_post(nn,
NFP_NET_CFG_MBOX_CMD_TLV_CMSG);
queue_work(nn->mbox_cmsg.workq,
&nn->mbox_cmsg.wait_work);
} else {
nfp_ccm_mbox_mark_next_runner(nn);
}
}
spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
return 0;
err_unlock:
spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
err_free_skb:
dev_kfree_skb_any(skb);
return err;
}
struct sk_buff * struct sk_buff *
nfp_ccm_mbox_msg_alloc(struct nfp_net *nn, unsigned int req_size, nfp_ccm_mbox_msg_alloc(struct nfp_net *nn, unsigned int req_size,
unsigned int reply_size, gfp_t flags) unsigned int reply_size, gfp_t flags)
...@@ -589,3 +703,32 @@ bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size) ...@@ -589,3 +703,32 @@ bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size)
{ {
return nfp_ccm_mbox_max_msg(nn) >= size; return nfp_ccm_mbox_max_msg(nn) >= size;
} }
int nfp_ccm_mbox_init(struct nfp_net *nn)
{
return 0;
}
void nfp_ccm_mbox_clean(struct nfp_net *nn)
{
drain_workqueue(nn->mbox_cmsg.workq);
}
int nfp_ccm_mbox_alloc(struct nfp_net *nn)
{
skb_queue_head_init(&nn->mbox_cmsg.queue);
init_waitqueue_head(&nn->mbox_cmsg.wq);
INIT_WORK(&nn->mbox_cmsg.wait_work, nfp_ccm_mbox_post_wait_work);
INIT_WORK(&nn->mbox_cmsg.runq_work, nfp_ccm_mbox_post_runq_work);
nn->mbox_cmsg.workq = alloc_workqueue("nfp-ccm-mbox", WQ_UNBOUND, 0);
if (!nn->mbox_cmsg.workq)
return -ENOMEM;
return 0;
}
void nfp_ccm_mbox_free(struct nfp_net *nn)
{
destroy_workqueue(nn->mbox_cmsg.workq);
WARN_ON(!skb_queue_empty(&nn->mbox_cmsg.queue));
}
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/io-64-nonatomic-hi-lo.h> #include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/semaphore.h> #include <linux/semaphore.h>
#include <linux/workqueue.h>
#include <net/xdp.h> #include <net/xdp.h>
#include "nfp_net_ctrl.h" #include "nfp_net_ctrl.h"
...@@ -586,6 +587,9 @@ struct nfp_net_dp { ...@@ -586,6 +587,9 @@ struct nfp_net_dp {
* @mbox_cmsg: Common Control Message via vNIC mailbox state * @mbox_cmsg: Common Control Message via vNIC mailbox state
* @mbox_cmsg.queue: CCM mbox queue of pending messages * @mbox_cmsg.queue: CCM mbox queue of pending messages
* @mbox_cmsg.wq: CCM mbox wait queue of waiting processes * @mbox_cmsg.wq: CCM mbox wait queue of waiting processes
* @mbox_cmsg.workq: CCM mbox work queue for @wait_work and @runq_work
* @mbox_cmsg.wait_work: CCM mbox posted msg reconfig wait work
* @mbox_cmsg.runq_work: CCM mbox posted msg queue runner work
* @mbox_cmsg.tag: CCM mbox message tag allocator * @mbox_cmsg.tag: CCM mbox message tag allocator
* @debugfs_dir: Device directory in debugfs * @debugfs_dir: Device directory in debugfs
* @vnic_list: Entry on device vNIC list * @vnic_list: Entry on device vNIC list
...@@ -669,6 +673,9 @@ struct nfp_net { ...@@ -669,6 +673,9 @@ struct nfp_net {
struct { struct {
struct sk_buff_head queue; struct sk_buff_head queue;
wait_queue_head_t wq; wait_queue_head_t wq;
struct workqueue_struct *workq;
struct work_struct wait_work;
struct work_struct runq_work;
u16 tag; u16 tag;
} mbox_cmsg; } mbox_cmsg;
...@@ -886,6 +893,11 @@ static inline void nn_ctrl_bar_lock(struct nfp_net *nn) ...@@ -886,6 +893,11 @@ static inline void nn_ctrl_bar_lock(struct nfp_net *nn)
down(&nn->bar_lock); down(&nn->bar_lock);
} }
static inline bool nn_ctrl_bar_trylock(struct nfp_net *nn)
{
return !down_trylock(&nn->bar_lock);
}
static inline void nn_ctrl_bar_unlock(struct nfp_net *nn) static inline void nn_ctrl_bar_unlock(struct nfp_net *nn)
{ {
up(&nn->bar_lock); up(&nn->bar_lock);
...@@ -927,6 +939,8 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn); ...@@ -927,6 +939,8 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size); int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size);
int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd); int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd);
int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd); int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd);
void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 update);
int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn);
unsigned int unsigned int
nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries, nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <net/vxlan.h> #include <net/vxlan.h>
#include "nfpcore/nfp_nsp.h" #include "nfpcore/nfp_nsp.h"
#include "ccm.h"
#include "nfp_app.h" #include "nfp_app.h"
#include "nfp_net_ctrl.h" #include "nfp_net_ctrl.h"
#include "nfp_net.h" #include "nfp_net.h"
...@@ -229,6 +230,7 @@ static void nfp_net_reconfig_sync_enter(struct nfp_net *nn) ...@@ -229,6 +230,7 @@ static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
spin_lock_bh(&nn->reconfig_lock); spin_lock_bh(&nn->reconfig_lock);
WARN_ON(nn->reconfig_sync_present);
nn->reconfig_sync_present = true; nn->reconfig_sync_present = true;
if (nn->reconfig_timer_active) { if (nn->reconfig_timer_active) {
...@@ -341,6 +343,24 @@ int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd) ...@@ -341,6 +343,24 @@ int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd)
return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET); return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
} }
void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 mbox_cmd)
{
u32 mbox = nn->tlv_caps.mbox_off;
nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_MBOX);
}
int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn)
{
u32 mbox = nn->tlv_caps.mbox_off;
nfp_net_reconfig_wait_posted(nn);
return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
}
int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd) int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd)
{ {
int ret; int ret;
...@@ -3814,14 +3834,15 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev, ...@@ -3814,14 +3834,15 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0); timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0);
skb_queue_head_init(&nn->mbox_cmsg.queue);
init_waitqueue_head(&nn->mbox_cmsg.wq);
err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar, err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
&nn->tlv_caps); &nn->tlv_caps);
if (err) if (err)
goto err_free_nn; goto err_free_nn;
err = nfp_ccm_mbox_alloc(nn);
if (err)
goto err_free_nn;
return nn; return nn;
err_free_nn: err_free_nn:
...@@ -3839,7 +3860,7 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev, ...@@ -3839,7 +3860,7 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
void nfp_net_free(struct nfp_net *nn) void nfp_net_free(struct nfp_net *nn)
{ {
WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted); WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
WARN_ON(!skb_queue_empty(&nn->mbox_cmsg.queue)); nfp_ccm_mbox_free(nn);
if (nn->dp.netdev) if (nn->dp.netdev)
free_netdev(nn->dp.netdev); free_netdev(nn->dp.netdev);
...@@ -4117,9 +4138,13 @@ int nfp_net_init(struct nfp_net *nn) ...@@ -4117,9 +4138,13 @@ int nfp_net_init(struct nfp_net *nn)
if (nn->dp.netdev) { if (nn->dp.netdev) {
nfp_net_netdev_init(nn); nfp_net_netdev_init(nn);
err = nfp_net_tls_init(nn); err = nfp_ccm_mbox_init(nn);
if (err) if (err)
return err; return err;
err = nfp_net_tls_init(nn);
if (err)
goto err_clean_mbox;
} }
nfp_net_vecs_init(nn); nfp_net_vecs_init(nn);
...@@ -4127,6 +4152,10 @@ int nfp_net_init(struct nfp_net *nn) ...@@ -4127,6 +4152,10 @@ int nfp_net_init(struct nfp_net *nn)
if (!nn->dp.netdev) if (!nn->dp.netdev)
return 0; return 0;
return register_netdev(nn->dp.netdev); return register_netdev(nn->dp.netdev);
err_clean_mbox:
nfp_ccm_mbox_clean(nn);
return err;
} }
/** /**
...@@ -4139,5 +4168,6 @@ void nfp_net_clean(struct nfp_net *nn) ...@@ -4139,5 +4168,6 @@ void nfp_net_clean(struct nfp_net *nn)
return; return;
unregister_netdev(nn->dp.netdev); unregister_netdev(nn->dp.netdev);
nfp_ccm_mbox_clean(nn);
nfp_net_reconfig_wait_posted(nn); nfp_net_reconfig_wait_posted(nn);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment