Commit b5277d18 authored by Cédric Le Goater's avatar Cédric Le Goater Committed by Michael Ellerman

powerpc/xive: Remove P9 DD1 flag XIVE_IRQ_FLAG_MASK_FW

This flag was used to support the PHB4 LSIs on P9 DD1 and we have
stopped supporting this CPU when DD2 came out. See skiboot commit:

  https://github.com/open-power/skiboot/commit/0b0d15e3c170Signed-off-by: default avatarCédric Le Goater <clg@kaod.org>
Reviewed-by: default avatarGreg Kurz <groug@kaod.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20201210171450.1933725-10-clg@kaod.org
parent 4cc0e36d
...@@ -1092,7 +1092,7 @@ enum { ...@@ -1092,7 +1092,7 @@ enum {
OPAL_XIVE_IRQ_STORE_EOI = 0x00000002, OPAL_XIVE_IRQ_STORE_EOI = 0x00000002,
OPAL_XIVE_IRQ_LSI = 0x00000004, OPAL_XIVE_IRQ_LSI = 0x00000004,
OPAL_XIVE_IRQ_SHIFT_BUG = 0x00000008, /* P9 DD1.0 workaround */ OPAL_XIVE_IRQ_SHIFT_BUG = 0x00000008, /* P9 DD1.0 workaround */
OPAL_XIVE_IRQ_MASK_VIA_FW = 0x00000010, OPAL_XIVE_IRQ_MASK_VIA_FW = 0x00000010, /* P9 DD1.0 workaround */
OPAL_XIVE_IRQ_EOI_VIA_FW = 0x00000020, OPAL_XIVE_IRQ_EOI_VIA_FW = 0x00000020,
}; };
......
...@@ -61,7 +61,7 @@ struct xive_irq_data { ...@@ -61,7 +61,7 @@ struct xive_irq_data {
#define XIVE_IRQ_FLAG_STORE_EOI 0x01 #define XIVE_IRQ_FLAG_STORE_EOI 0x01
#define XIVE_IRQ_FLAG_LSI 0x02 #define XIVE_IRQ_FLAG_LSI 0x02
/* #define XIVE_IRQ_FLAG_SHIFT_BUG 0x04 */ /* P9 DD1.0 workaround */ /* #define XIVE_IRQ_FLAG_SHIFT_BUG 0x04 */ /* P9 DD1.0 workaround */
#define XIVE_IRQ_FLAG_MASK_FW 0x08 /* #define XIVE_IRQ_FLAG_MASK_FW 0x08 */ /* P9 DD1.0 workaround */
#define XIVE_IRQ_FLAG_EOI_FW 0x10 #define XIVE_IRQ_FLAG_EOI_FW 0x10
#define XIVE_IRQ_FLAG_H_INT_ESB 0x20 #define XIVE_IRQ_FLAG_H_INT_ESB 0x20
......
...@@ -419,37 +419,16 @@ static u8 xive_lock_and_mask(struct kvmppc_xive *xive, ...@@ -419,37 +419,16 @@ static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
/* Get the right irq */ /* Get the right irq */
kvmppc_xive_select_irq(state, &hw_num, &xd); kvmppc_xive_select_irq(state, &hw_num, &xd);
/* Set PQ to 10, return old P and old Q and remember them */
val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
state->old_p = !!(val & 2);
state->old_q = !!(val & 1);
/* /*
* If the interrupt is marked as needing masking via * Synchronize hardware to sensure the queues are updated when
* firmware, we do it here. Firmware masking however * masking
* is "lossy", it won't return the old p and q bits
* and won't set the interrupt to a state where it will
* record queued ones. If this is an issue we should do
* lazy masking instead.
*
* For now, we work around this in unmask by forcing
* an interrupt whenever we unmask a non-LSI via FW
* (if ever).
*/ */
if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) { xive_native_sync_source(hw_num);
xive_native_configure_irq(hw_num,
kvmppc_xive_vp(xive, state->act_server),
MASKED, state->number);
/* set old_p so we can track if an H_EOI was done */
state->old_p = true;
state->old_q = false;
} else {
/* Set PQ to 10, return old P and old Q and remember them */
val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
state->old_p = !!(val & 2);
state->old_q = !!(val & 1);
/*
* Synchronize hardware to sensure the queues are updated
* when masking
*/
xive_native_sync_source(hw_num);
}
return old_prio; return old_prio;
} }
...@@ -483,23 +462,6 @@ static void xive_finish_unmask(struct kvmppc_xive *xive, ...@@ -483,23 +462,6 @@ static void xive_finish_unmask(struct kvmppc_xive *xive,
/* Get the right irq */ /* Get the right irq */
kvmppc_xive_select_irq(state, &hw_num, &xd); kvmppc_xive_select_irq(state, &hw_num, &xd);
/*
* See comment in xive_lock_and_mask() concerning masking
* via firmware.
*/
if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
xive_native_configure_irq(hw_num,
kvmppc_xive_vp(xive, state->act_server),
state->act_priority, state->number);
/* If an EOI is needed, do it here */
if (!state->old_p)
xive_vm_source_eoi(hw_num, xd);
/* If this is not an LSI, force a trigger */
if (!(xd->flags & OPAL_XIVE_IRQ_LSI))
xive_irq_trigger(xd);
goto bail;
}
/* Old Q set, set PQ to 11 */ /* Old Q set, set PQ to 11 */
if (state->old_q) if (state->old_q)
xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11); xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
......
...@@ -424,9 +424,7 @@ static void xive_irq_eoi(struct irq_data *d) ...@@ -424,9 +424,7 @@ static void xive_irq_eoi(struct irq_data *d)
} }
/* /*
* Helper used to mask and unmask an interrupt source. This * Helper used to mask and unmask an interrupt source.
* is only called for normal interrupts that do not require
* masking/unmasking via firmware.
*/ */
static void xive_do_source_set_mask(struct xive_irq_data *xd, static void xive_do_source_set_mask(struct xive_irq_data *xd,
bool mask) bool mask)
...@@ -673,20 +671,6 @@ static void xive_irq_unmask(struct irq_data *d) ...@@ -673,20 +671,6 @@ static void xive_irq_unmask(struct irq_data *d)
pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd); pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd);
/*
* This is a workaround for PCI LSI problems on P9, for
* these, we call FW to set the mask. The problems might
* be fixed by P9 DD2.0, if that is the case, firmware
* will no longer set that flag.
*/
if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
xive_ops->configure_irq(hw_irq,
get_hard_smp_processor_id(xd->target),
xive_irq_priority, d->irq);
return;
}
xive_do_source_set_mask(xd, false); xive_do_source_set_mask(xd, false);
} }
...@@ -696,20 +680,6 @@ static void xive_irq_mask(struct irq_data *d) ...@@ -696,20 +680,6 @@ static void xive_irq_mask(struct irq_data *d)
pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd); pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd);
/*
* This is a workaround for PCI LSI problems on P9, for
* these, we call OPAL to set the mask. The problems might
* be fixed by P9 DD2.0, if that is the case, firmware
* will no longer set that flag.
*/
if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
xive_ops->configure_irq(hw_irq,
get_hard_smp_processor_id(xd->target),
0xff, d->irq);
return;
}
xive_do_source_set_mask(xd, true); xive_do_source_set_mask(xd, true);
} }
...@@ -852,13 +822,6 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state) ...@@ -852,13 +822,6 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
int rc; int rc;
u8 pq; u8 pq;
/*
* We only support this on interrupts that do not require
* firmware calls for masking and unmasking
*/
if (xd->flags & XIVE_IRQ_FLAG_MASK_FW)
return -EIO;
/* /*
* This is called by KVM with state non-NULL for enabling * This is called by KVM with state non-NULL for enabling
* pass-through or NULL for disabling it * pass-through or NULL for disabling it
...@@ -1304,7 +1267,6 @@ static const struct { ...@@ -1304,7 +1267,6 @@ static const struct {
} xive_irq_flags[] = { } xive_irq_flags[] = {
{ XIVE_IRQ_FLAG_STORE_EOI, "STORE_EOI" }, { XIVE_IRQ_FLAG_STORE_EOI, "STORE_EOI" },
{ XIVE_IRQ_FLAG_LSI, "LSI" }, { XIVE_IRQ_FLAG_LSI, "LSI" },
{ XIVE_IRQ_FLAG_MASK_FW, "MASK_FW" },
{ XIVE_IRQ_FLAG_EOI_FW, "EOI_FW" }, { XIVE_IRQ_FLAG_EOI_FW, "EOI_FW" },
{ XIVE_IRQ_FLAG_H_INT_ESB, "H_INT_ESB" }, { XIVE_IRQ_FLAG_H_INT_ESB, "H_INT_ESB" },
{ XIVE_IRQ_FLAG_NO_EOI, "NO_EOI" }, { XIVE_IRQ_FLAG_NO_EOI, "NO_EOI" },
......
...@@ -64,8 +64,6 @@ int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) ...@@ -64,8 +64,6 @@ int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
data->flags |= XIVE_IRQ_FLAG_STORE_EOI; data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
if (opal_flags & OPAL_XIVE_IRQ_LSI) if (opal_flags & OPAL_XIVE_IRQ_LSI)
data->flags |= XIVE_IRQ_FLAG_LSI; data->flags |= XIVE_IRQ_FLAG_LSI;
if (opal_flags & OPAL_XIVE_IRQ_MASK_VIA_FW)
data->flags |= XIVE_IRQ_FLAG_MASK_FW;
if (opal_flags & OPAL_XIVE_IRQ_EOI_VIA_FW) if (opal_flags & OPAL_XIVE_IRQ_EOI_VIA_FW)
data->flags |= XIVE_IRQ_FLAG_EOI_FW; data->flags |= XIVE_IRQ_FLAG_EOI_FW;
data->eoi_page = be64_to_cpu(eoi_page); data->eoi_page = be64_to_cpu(eoi_page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment