Commit 7395a884 authored by Intiyaz Basha's avatar Intiyaz Basha Committed by David S. Miller

liquidio: avoided acquiring post_lock for data only queues

All control commands (soft commands) goes through only Queue 0
(control and data queue). So only queue-0 needs post_lock,
other queues are only data queues and does not need post_lock

Added a flag to indicate the queue can be used for soft commands.

If this flag is set, post_lock must be acquired before posting
a command to the queue.
If this flag is clear, post_lock is invalid for the queue.
Signed-off-by: default avatarIntiyaz Basha <intiyaz.basha@cavium.com>
Signed-off-by: default avatarFelix Manlunas <felix.manlunas@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3789caba
......@@ -82,6 +82,16 @@ struct octeon_instr_queue {
/** A spinlock to protect while posting on the ring. */
spinlock_t post_lock;
/** This flag indicates if the queue can be used for soft commands.
* If this flag is set, post_lock must be acquired before posting
* a command to the queue.
* If this flag is clear, post_lock is invalid for the queue.
* All control commands (soft commands) will go through only Queue 0
* (control and data queue). So only queue-0 needs post_lock,
* other queues are only data queues and does not need post_lock
*/
bool allow_soft_cmds;
u32 pkt_in_done;
/** A spinlock to protect access to the input ring.*/
......
......@@ -126,7 +126,12 @@ int octeon_init_instr_queue(struct octeon_device *oct,
/* Initialize the spinlock for this instruction queue */
spin_lock_init(&iq->lock);
if (iq_no == 0) {
iq->allow_soft_cmds = true;
spin_lock_init(&iq->post_lock);
} else {
iq->allow_soft_cmds = false;
}
spin_lock_init(&iq->iq_flush_running_lock);
......@@ -566,6 +571,7 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
/* Get the lock and prevent other tasks and tx interrupt handler from
* running.
*/
if (iq->allow_soft_cmds)
spin_lock_bh(&iq->post_lock);
st = __post_command2(iq, cmd);
......@@ -583,6 +589,7 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
}
if (iq->allow_soft_cmds)
spin_unlock_bh(&iq->post_lock);
/* This is only done here to expedite packets being flushed
......@@ -702,11 +709,20 @@ octeon_prepare_soft_command(struct octeon_device *oct,
int octeon_send_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc)
{
struct octeon_instr_queue *iq;
struct octeon_instr_ih2 *ih2;
struct octeon_instr_ih3 *ih3;
struct octeon_instr_irh *irh;
u32 len;
iq = oct->instr_queue[sc->iq_no];
if (!iq->allow_soft_cmds) {
dev_err(&oct->pci_dev->dev, "Soft commands are not allowed on Queue %d\n",
sc->iq_no);
INCR_INSTRQUEUE_PKT_COUNT(oct, sc->iq_no, instr_dropped, 1);
return IQ_SEND_FAILED;
}
if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
if (ih3->dlengsz) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment