Commit a7b94c15 authored by Justin Tee's avatar Justin Tee Committed by Martin K. Petersen

scsi: lpfc: Replace blk_irq_poll intr handler with threaded IRQ

It has been determined that the threaded IRQ API accomplishes effectively
the same performance metrics as blk_irq_poll.  As blk_irq_poll is mostly
scheduled by the softirqd and handled in softirq context, this is not
entirely desired from a Fibre Channel driver context.  A threaded IRQ model
fits cleaner.  This patch replaces the blk_irq_poll logic with threaded
IRQ.
Signed-off-by: default avatarJustin Tee <justin.tee@broadcom.com>
Link: https://lore.kernel.org/r/20230417191558.83100-7-justintee8345@gmail.comSigned-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 5fc849d8
...@@ -247,6 +247,7 @@ irqreturn_t lpfc_sli_sp_intr_handler(int, void *); ...@@ -247,6 +247,7 @@ irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
irqreturn_t lpfc_sli_fp_intr_handler(int, void *); irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
irqreturn_t lpfc_sli4_intr_handler(int, void *); irqreturn_t lpfc_sli4_intr_handler(int, void *);
irqreturn_t lpfc_sli4_hba_intr_handler(int, void *); irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id);
int lpfc_read_object(struct lpfc_hba *phba, char *s, uint32_t *datap, int lpfc_read_object(struct lpfc_hba *phba, char *s, uint32_t *datap,
uint32_t len); uint32_t len);
......
...@@ -1279,7 +1279,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) ...@@ -1279,7 +1279,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
/* /*
* lpfc_idle_stat_delay_work - idle_stat tracking * lpfc_idle_stat_delay_work - idle_stat tracking
* *
* This routine tracks per-cq idle_stat and determines polling decisions. * This routine tracks per-eq idle_stat and determines polling decisions.
* *
* Return codes: * Return codes:
* None * None
...@@ -1290,7 +1290,7 @@ lpfc_idle_stat_delay_work(struct work_struct *work) ...@@ -1290,7 +1290,7 @@ lpfc_idle_stat_delay_work(struct work_struct *work)
struct lpfc_hba *phba = container_of(to_delayed_work(work), struct lpfc_hba *phba = container_of(to_delayed_work(work),
struct lpfc_hba, struct lpfc_hba,
idle_stat_delay_work); idle_stat_delay_work);
struct lpfc_queue *cq; struct lpfc_queue *eq;
struct lpfc_sli4_hdw_queue *hdwq; struct lpfc_sli4_hdw_queue *hdwq;
struct lpfc_idle_stat *idle_stat; struct lpfc_idle_stat *idle_stat;
u32 i, idle_percent; u32 i, idle_percent;
...@@ -1306,10 +1306,10 @@ lpfc_idle_stat_delay_work(struct work_struct *work) ...@@ -1306,10 +1306,10 @@ lpfc_idle_stat_delay_work(struct work_struct *work)
for_each_present_cpu(i) { for_each_present_cpu(i) {
hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
cq = hdwq->io_cq; eq = hdwq->hba_eq;
/* Skip if we've already handled this cq's primary CPU */ /* Skip if we've already handled this eq's primary CPU */
if (cq->chann != i) if (eq->chann != i)
continue; continue;
idle_stat = &phba->sli4_hba.idle_stat[i]; idle_stat = &phba->sli4_hba.idle_stat[i];
...@@ -1333,9 +1333,9 @@ lpfc_idle_stat_delay_work(struct work_struct *work) ...@@ -1333,9 +1333,9 @@ lpfc_idle_stat_delay_work(struct work_struct *work)
idle_percent = 100 - idle_percent; idle_percent = 100 - idle_percent;
if (idle_percent < 15) if (idle_percent < 15)
cq->poll_mode = LPFC_QUEUE_WORK; eq->poll_mode = LPFC_QUEUE_WORK;
else else
cq->poll_mode = LPFC_IRQ_POLL; eq->poll_mode = LPFC_THREADED_IRQ;
idle_stat->prev_idle = wall_idle; idle_stat->prev_idle = wall_idle;
idle_stat->prev_wall = wall; idle_stat->prev_wall = wall;
...@@ -4357,6 +4357,7 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) ...@@ -4357,6 +4357,7 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
struct lpfc_sli4_hdw_queue *qp; struct lpfc_sli4_hdw_queue *qp;
struct lpfc_io_buf *lpfc_cmd; struct lpfc_io_buf *lpfc_cmd;
int idx, cnt; int idx, cnt;
unsigned long iflags;
qp = phba->sli4_hba.hdwq; qp = phba->sli4_hba.hdwq;
cnt = 0; cnt = 0;
...@@ -4371,12 +4372,13 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) ...@@ -4371,12 +4372,13 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
lpfc_cmd->hdwq_no = idx; lpfc_cmd->hdwq_no = idx;
lpfc_cmd->hdwq = qp; lpfc_cmd->hdwq = qp;
lpfc_cmd->cur_iocbq.cmd_cmpl = NULL; lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
spin_lock(&qp->io_buf_list_put_lock); spin_lock_irqsave(&qp->io_buf_list_put_lock, iflags);
list_add_tail(&lpfc_cmd->list, list_add_tail(&lpfc_cmd->list,
&qp->lpfc_io_buf_list_put); &qp->lpfc_io_buf_list_put);
qp->put_io_bufs++; qp->put_io_bufs++;
qp->total_io_bufs++; qp->total_io_bufs++;
spin_unlock(&qp->io_buf_list_put_lock); spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
iflags);
} }
} }
return cnt; return cnt;
...@@ -13117,8 +13119,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) ...@@ -13117,8 +13119,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
} }
eqhdl->irq = rc; eqhdl->irq = rc;
rc = request_irq(eqhdl->irq, &lpfc_sli4_hba_intr_handler, 0, rc = request_threaded_irq(eqhdl->irq,
name, eqhdl); &lpfc_sli4_hba_intr_handler,
&lpfc_sli4_hba_intr_handler_th,
IRQF_ONESHOT, name, eqhdl);
if (rc) { if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) " "0486 MSI-X fast-path (%d) "
......
...@@ -82,7 +82,8 @@ static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, ...@@ -82,7 +82,8 @@ static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
int); int);
static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
struct lpfc_queue *eq, struct lpfc_queue *eq,
struct lpfc_eqe *eqe); struct lpfc_eqe *eqe,
enum lpfc_poll_mode poll_mode);
static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q); static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
...@@ -629,7 +630,7 @@ lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) ...@@ -629,7 +630,7 @@ lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
static int static int
lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq, lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
uint8_t rearm) u8 rearm, enum lpfc_poll_mode poll_mode)
{ {
struct lpfc_eqe *eqe; struct lpfc_eqe *eqe;
int count = 0, consumed = 0; int count = 0, consumed = 0;
...@@ -639,7 +640,7 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq, ...@@ -639,7 +640,7 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
eqe = lpfc_sli4_eq_get(eq); eqe = lpfc_sli4_eq_get(eq);
while (eqe) { while (eqe) {
lpfc_sli4_hba_handle_eqe(phba, eq, eqe); lpfc_sli4_hba_handle_eqe(phba, eq, eqe, poll_mode);
__lpfc_sli4_consume_eqe(phba, eq, eqe); __lpfc_sli4_consume_eqe(phba, eq, eqe);
consumed++; consumed++;
...@@ -7957,7 +7958,7 @@ lpfc_config_cgn_signal(struct lpfc_hba *phba) ...@@ -7957,7 +7958,7 @@ lpfc_config_cgn_signal(struct lpfc_hba *phba)
* lpfc_init_idle_stat_hb - Initialize idle_stat tracking * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
* @phba: pointer to lpfc hba data structure. * @phba: pointer to lpfc hba data structure.
* *
* This routine initializes the per-cq idle_stat to dynamically dictate * This routine initializes the per-eq idle_stat to dynamically dictate
* polling decisions. * polling decisions.
* *
* Return codes: * Return codes:
...@@ -7967,16 +7968,16 @@ static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba) ...@@ -7967,16 +7968,16 @@ static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
{ {
int i; int i;
struct lpfc_sli4_hdw_queue *hdwq; struct lpfc_sli4_hdw_queue *hdwq;
struct lpfc_queue *cq; struct lpfc_queue *eq;
struct lpfc_idle_stat *idle_stat; struct lpfc_idle_stat *idle_stat;
u64 wall; u64 wall;
for_each_present_cpu(i) { for_each_present_cpu(i) {
hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
cq = hdwq->io_cq; eq = hdwq->hba_eq;
/* Skip if we've already handled this cq's primary CPU */ /* Skip if we've already handled this eq's primary CPU */
if (cq->chann != i) if (eq->chann != i)
continue; continue;
idle_stat = &phba->sli4_hba.idle_stat[i]; idle_stat = &phba->sli4_hba.idle_stat[i];
...@@ -7985,13 +7986,14 @@ static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba) ...@@ -7985,13 +7986,14 @@ static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
idle_stat->prev_wall = wall; idle_stat->prev_wall = wall;
if (phba->nvmet_support || if (phba->nvmet_support ||
phba->cmf_active_mode != LPFC_CFG_OFF) phba->cmf_active_mode != LPFC_CFG_OFF ||
cq->poll_mode = LPFC_QUEUE_WORK; phba->intr_type != MSIX)
eq->poll_mode = LPFC_QUEUE_WORK;
else else
cq->poll_mode = LPFC_IRQ_POLL; eq->poll_mode = LPFC_THREADED_IRQ;
} }
if (!phba->nvmet_support) if (!phba->nvmet_support && phba->intr_type == MSIX)
schedule_delayed_work(&phba->idle_stat_delay_work, schedule_delayed_work(&phba->idle_stat_delay_work,
msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
} }
...@@ -9218,7 +9220,8 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) ...@@ -9218,7 +9220,8 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
if (mbox_pending) if (mbox_pending)
/* process and rearm the EQ */ /* process and rearm the EQ */
lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM); lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
LPFC_QUEUE_WORK);
else else
/* Always clear and re-arm the EQ */ /* Always clear and re-arm the EQ */
sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM); sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
...@@ -11254,7 +11257,8 @@ inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq) ...@@ -11254,7 +11257,8 @@ inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq)
* will be handled through a sched from polling timer * will be handled through a sched from polling timer
* function which is currently triggered every 1msec. * function which is currently triggered every 1msec.
*/ */
lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM); lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM,
LPFC_QUEUE_WORK);
} }
/** /**
...@@ -14835,7 +14839,6 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, ...@@ -14835,7 +14839,6 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
* @cq: Pointer to CQ to be processed * @cq: Pointer to CQ to be processed
* @handler: Routine to process each cqe * @handler: Routine to process each cqe
* @delay: Pointer to usdelay to set in case of rescheduling of the handler * @delay: Pointer to usdelay to set in case of rescheduling of the handler
* @poll_mode: Polling mode we were called from
* *
* This routine processes completion queue entries in a CQ. While a valid * This routine processes completion queue entries in a CQ. While a valid
* queue element is found, the handler is called. During processing checks * queue element is found, the handler is called. During processing checks
...@@ -14853,8 +14856,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, ...@@ -14853,8 +14856,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
static bool static bool
__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
bool (*handler)(struct lpfc_hba *, struct lpfc_queue *, bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_cqe *), unsigned long *delay, struct lpfc_cqe *), unsigned long *delay)
enum lpfc_poll_mode poll_mode)
{ {
struct lpfc_cqe *cqe; struct lpfc_cqe *cqe;
bool workposted = false; bool workposted = false;
...@@ -14895,10 +14897,6 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, ...@@ -14895,10 +14897,6 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
arm = false; arm = false;
} }
/* Note: complete the irq_poll softirq before rearming CQ */
if (poll_mode == LPFC_IRQ_POLL)
irq_poll_complete(&cq->iop);
/* Track the max number of CQEs processed in 1 EQ */ /* Track the max number of CQEs processed in 1 EQ */
if (count > cq->CQ_max_cqe) if (count > cq->CQ_max_cqe)
cq->CQ_max_cqe = count; cq->CQ_max_cqe = count;
...@@ -14948,17 +14946,17 @@ __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq) ...@@ -14948,17 +14946,17 @@ __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
case LPFC_MCQ: case LPFC_MCQ:
workposted |= __lpfc_sli4_process_cq(phba, cq, workposted |= __lpfc_sli4_process_cq(phba, cq,
lpfc_sli4_sp_handle_mcqe, lpfc_sli4_sp_handle_mcqe,
&delay, LPFC_QUEUE_WORK); &delay);
break; break;
case LPFC_WCQ: case LPFC_WCQ:
if (cq->subtype == LPFC_IO) if (cq->subtype == LPFC_IO)
workposted |= __lpfc_sli4_process_cq(phba, cq, workposted |= __lpfc_sli4_process_cq(phba, cq,
lpfc_sli4_fp_handle_cqe, lpfc_sli4_fp_handle_cqe,
&delay, LPFC_QUEUE_WORK); &delay);
else else
workposted |= __lpfc_sli4_process_cq(phba, cq, workposted |= __lpfc_sli4_process_cq(phba, cq,
lpfc_sli4_sp_handle_cqe, lpfc_sli4_sp_handle_cqe,
&delay, LPFC_QUEUE_WORK); &delay);
break; break;
default: default:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
...@@ -15335,45 +15333,64 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, ...@@ -15335,45 +15333,64 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
} }
/** /**
* lpfc_sli4_sched_cq_work - Schedules cq work * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
* @phba: Pointer to HBA context object. * @cq: Pointer to CQ to be processed
* @cq: Pointer to CQ
* @cqid: CQ ID
*
* This routine checks the poll mode of the CQ corresponding to
* cq->chann, then either schedules a softirq or queue_work to complete
* cq work.
* *
* queue_work path is taken if in NVMET mode, or if poll_mode is in * This routine calls the cq processing routine with the handler for
* LPFC_QUEUE_WORK mode. Otherwise, softirq path is taken. * fast path CQEs.
* *
* The CQ routine returns two values: the first is the calling status,
* which indicates whether work was queued to the background discovery
* thread. If true, the routine should wakeup the discovery thread;
* the second is the delay parameter. If non-zero, rather than rearming
* the CQ and yet another interrupt, the CQ handler should be queued so
* that it is processed in a subsequent polling action. The value of
* the delay indicates when to reschedule it.
**/ **/
static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba, static void
struct lpfc_queue *cq, uint16_t cqid) __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
{ {
int ret = 0; struct lpfc_hba *phba = cq->phba;
unsigned long delay;
bool workposted = false;
int ret;
switch (cq->poll_mode) { /* process and rearm the CQ */
case LPFC_IRQ_POLL: workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
/* CGN mgmt is mutually exclusive from softirq processing */ &delay);
if (phba->cmf_active_mode == LPFC_CFG_OFF) {
irq_poll_sched(&cq->iop); if (delay) {
break;
}
fallthrough;
case LPFC_QUEUE_WORK:
default:
if (is_kdump_kernel()) if (is_kdump_kernel())
ret = queue_work(phba->wq, &cq->irqwork); ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
delay);
else else
ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork); ret = queue_delayed_work_on(cq->chann, phba->wq,
&cq->sched_irqwork, delay);
if (!ret) if (!ret)
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0383 Cannot schedule queue work " "0367 Cannot schedule queue work "
"for CQ eqcqid=%d, cqid=%d on CPU %d\n", "for cqid=%d on CPU %d\n",
cqid, cq->queue_id, cq->queue_id, cq->chann);
raw_smp_processor_id());
} }
/* wake up worker thread if there are works to be done */
if (workposted)
lpfc_worker_wake_up(phba);
}
/**
* lpfc_sli4_hba_process_cq - fast-path work handler when started by
* interrupt
* @work: pointer to work element
*
* translates from the work handler and calls the fast-path handler.
**/
static void
lpfc_sli4_hba_process_cq(struct work_struct *work)
{
struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
__lpfc_sli4_hba_process_cq(cq);
} }
/** /**
...@@ -15381,6 +15398,7 @@ static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba, ...@@ -15381,6 +15398,7 @@ static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
* @phba: Pointer to HBA context object. * @phba: Pointer to HBA context object.
* @eq: Pointer to the queue structure. * @eq: Pointer to the queue structure.
* @eqe: Pointer to fast-path event queue entry. * @eqe: Pointer to fast-path event queue entry.
* @poll_mode: poll_mode to execute processing the cq.
* *
* This routine process a event queue entry from the fast-path event queue. * This routine process a event queue entry from the fast-path event queue.
* It will check the MajorCode and MinorCode to determine this is for a * It will check the MajorCode and MinorCode to determine this is for a
...@@ -15391,11 +15409,12 @@ static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba, ...@@ -15391,11 +15409,12 @@ static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
**/ **/
static void static void
lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
struct lpfc_eqe *eqe) struct lpfc_eqe *eqe, enum lpfc_poll_mode poll_mode)
{ {
struct lpfc_queue *cq = NULL; struct lpfc_queue *cq = NULL;
uint32_t qidx = eq->hdwq; uint32_t qidx = eq->hdwq;
uint16_t cqid, id; uint16_t cqid, id;
int ret;
if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
...@@ -15455,70 +15474,25 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, ...@@ -15455,70 +15474,25 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
else else
cq->isr_timestamp = 0; cq->isr_timestamp = 0;
#endif #endif
lpfc_sli4_sched_cq_work(phba, cq, cqid);
}
/** switch (poll_mode) {
* __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry case LPFC_THREADED_IRQ:
* @cq: Pointer to CQ to be processed __lpfc_sli4_hba_process_cq(cq);
* @poll_mode: Enum lpfc_poll_state to determine poll mode break;
* case LPFC_QUEUE_WORK:
* This routine calls the cq processing routine with the handler for default:
* fast path CQEs.
*
* The CQ routine returns two values: the first is the calling status,
* which indicates whether work was queued to the background discovery
* thread. If true, the routine should wakeup the discovery thread;
* the second is the delay parameter. If non-zero, rather than rearming
* the CQ and yet another interrupt, the CQ handler should be queued so
* that it is processed in a subsequent polling action. The value of
* the delay indicates when to reschedule it.
**/
static void
__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
enum lpfc_poll_mode poll_mode)
{
struct lpfc_hba *phba = cq->phba;
unsigned long delay;
bool workposted = false;
int ret = 0;
/* process and rearm the CQ */
workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
&delay, poll_mode);
if (delay) {
if (is_kdump_kernel()) if (is_kdump_kernel())
ret = queue_delayed_work(phba->wq, &cq->sched_irqwork, ret = queue_work(phba->wq, &cq->irqwork);
delay);
else else
ret = queue_delayed_work_on(cq->chann, phba->wq, ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
&cq->sched_irqwork, delay);
if (!ret) if (!ret)
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0367 Cannot schedule queue work " "0383 Cannot schedule queue work "
"for cqid=%d on CPU %d\n", "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
cq->queue_id, cq->chann); cqid, cq->queue_id,
raw_smp_processor_id());
break;
} }
/* wake up worker thread if there are works to be done */
if (workposted)
lpfc_worker_wake_up(phba);
}
/**
* lpfc_sli4_hba_process_cq - fast-path work handler when started by
* interrupt
* @work: pointer to work element
*
* translates from the work handler and calls the fast-path handler.
**/
static void
lpfc_sli4_hba_process_cq(struct work_struct *work)
{
struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
__lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
} }
/** /**
...@@ -15533,7 +15507,7 @@ lpfc_sli4_dly_hba_process_cq(struct work_struct *work) ...@@ -15533,7 +15507,7 @@ lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
struct lpfc_queue *cq = container_of(to_delayed_work(work), struct lpfc_queue *cq = container_of(to_delayed_work(work),
struct lpfc_queue, sched_irqwork); struct lpfc_queue, sched_irqwork);
__lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK); __lpfc_sli4_hba_process_cq(cq);
} }
/** /**
...@@ -15559,8 +15533,9 @@ lpfc_sli4_dly_hba_process_cq(struct work_struct *work) ...@@ -15559,8 +15533,9 @@ lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
* and returns for these events. This function is called without any lock * and returns for these events. This function is called without any lock
* held. It gets the hbalock to access and update SLI data structures. * held. It gets the hbalock to access and update SLI data structures.
* *
* This function returns IRQ_HANDLED when interrupt is handled else it * This function returns IRQ_HANDLED when interrupt is handled, IRQ_WAKE_THREAD
* returns IRQ_NONE. * when interrupt is scheduled to be handled from a threaded irq context, or
* else returns IRQ_NONE.
**/ **/
irqreturn_t irqreturn_t
lpfc_sli4_hba_intr_handler(int irq, void *dev_id) lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
...@@ -15569,8 +15544,8 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) ...@@ -15569,8 +15544,8 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
struct lpfc_hba_eq_hdl *hba_eq_hdl; struct lpfc_hba_eq_hdl *hba_eq_hdl;
struct lpfc_queue *fpeq; struct lpfc_queue *fpeq;
unsigned long iflag; unsigned long iflag;
int ecount = 0;
int hba_eqidx; int hba_eqidx;
int ecount = 0;
struct lpfc_eq_intr_info *eqi; struct lpfc_eq_intr_info *eqi;
/* Get the driver's phba structure from the dev_id */ /* Get the driver's phba structure from the dev_id */
...@@ -15599,30 +15574,41 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) ...@@ -15599,30 +15574,41 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
return IRQ_NONE; return IRQ_NONE;
} }
eqi = this_cpu_ptr(phba->sli4_hba.eq_info); switch (fpeq->poll_mode) {
eqi->icnt++; case LPFC_THREADED_IRQ:
/* CGN mgmt is mutually exclusive from irq processing */
fpeq->last_cpu = raw_smp_processor_id(); if (phba->cmf_active_mode == LPFC_CFG_OFF)
return IRQ_WAKE_THREAD;
fallthrough;
case LPFC_QUEUE_WORK:
default:
eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
eqi->icnt++;
if (eqi->icnt > LPFC_EQD_ISR_TRIGGER && fpeq->last_cpu = raw_smp_processor_id();
fpeq->q_flag & HBA_EQ_DELAY_CHK &&
phba->cfg_auto_imax &&
fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
/* process and rearm the EQ */ if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM); fpeq->q_flag & HBA_EQ_DELAY_CHK &&
phba->cfg_auto_imax &&
fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
lpfc_sli4_mod_hba_eq_delay(phba, fpeq,
LPFC_MAX_AUTO_EQ_DELAY);
if (unlikely(ecount == 0)) { /* process and rearm the EQ */
fpeq->EQ_no_entry++; ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
if (phba->intr_type == MSIX) LPFC_QUEUE_WORK);
/* MSI-X treated interrupt served as no EQ share INT */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, if (unlikely(ecount == 0)) {
"0358 MSI-X interrupt with no EQE\n"); fpeq->EQ_no_entry++;
else if (phba->intr_type == MSIX)
/* Non MSI-X treated on interrupt as EQ share INT */ /* MSI-X treated interrupt served as no EQ share INT */
return IRQ_NONE; lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0358 MSI-X interrupt with no EQE\n");
else
/* Non MSI-X treated on interrupt as EQ share INT */
return IRQ_NONE;
}
} }
return IRQ_HANDLED; return IRQ_HANDLED;
...@@ -16179,13 +16165,69 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) ...@@ -16179,13 +16165,69 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
return status; return status;
} }
static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget) /**
* lpfc_sli4_hba_intr_handler_th - SLI4 HBA threaded interrupt handler
* @irq: Interrupt number.
* @dev_id: The device context pointer.
*
* This routine is a mirror of lpfc_sli4_hba_intr_handler, but executed within
* threaded irq context.
*
* Returns
* IRQ_HANDLED - interrupt is handled
* IRQ_NONE - otherwise
**/
irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id)
{ {
struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop); struct lpfc_hba *phba;
struct lpfc_hba_eq_hdl *hba_eq_hdl;
struct lpfc_queue *fpeq;
int ecount = 0;
int hba_eqidx;
struct lpfc_eq_intr_info *eqi;
/* Get the driver's phba structure from the dev_id */
hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
phba = hba_eq_hdl->phba;
hba_eqidx = hba_eq_hdl->idx;
__lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL); if (unlikely(!phba))
return IRQ_NONE;
if (unlikely(!phba->sli4_hba.hdwq))
return IRQ_NONE;
return 1; /* Get to the EQ struct associated with this vector */
fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
if (unlikely(!fpeq))
return IRQ_NONE;
eqi = per_cpu_ptr(phba->sli4_hba.eq_info, raw_smp_processor_id());
eqi->icnt++;
fpeq->last_cpu = raw_smp_processor_id();
if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
fpeq->q_flag & HBA_EQ_DELAY_CHK &&
phba->cfg_auto_imax &&
fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
/* process and rearm the EQ */
ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
LPFC_THREADED_IRQ);
if (unlikely(ecount == 0)) {
fpeq->EQ_no_entry++;
if (phba->intr_type == MSIX)
/* MSI-X treated interrupt served as no EQ share INT */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"3358 MSI-X interrupt with no EQE\n");
else
/* Non MSI-X treated on interrupt as EQ share INT */
return IRQ_NONE;
}
return IRQ_HANDLED;
} }
/** /**
...@@ -16329,8 +16371,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, ...@@ -16329,8 +16371,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
if (cq->queue_id > phba->sli4_hba.cq_max) if (cq->queue_id > phba->sli4_hba.cq_max)
phba->sli4_hba.cq_max = cq->queue_id; phba->sli4_hba.cq_max = cq->queue_id;
irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
out: out:
mempool_free(mbox, phba->mbox_mem_pool); mempool_free(mbox, phba->mbox_mem_pool);
return status; return status;
......
...@@ -140,7 +140,7 @@ struct lpfc_rqb { ...@@ -140,7 +140,7 @@ struct lpfc_rqb {
enum lpfc_poll_mode { enum lpfc_poll_mode {
LPFC_QUEUE_WORK, LPFC_QUEUE_WORK,
LPFC_IRQ_POLL LPFC_THREADED_IRQ,
}; };
struct lpfc_idle_stat { struct lpfc_idle_stat {
...@@ -279,8 +279,6 @@ struct lpfc_queue { ...@@ -279,8 +279,6 @@ struct lpfc_queue {
struct list_head _poll_list; struct list_head _poll_list;
void **q_pgs; /* array to index entries per page */ void **q_pgs; /* array to index entries per page */
#define LPFC_IRQ_POLL_WEIGHT 256
struct irq_poll iop;
enum lpfc_poll_mode poll_mode; enum lpfc_poll_mode poll_mode;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment