Commit 2f86e45a authored by Thomas Gleixner's avatar Thomas Gleixner

Merge tag 'irqchip-fixes-5.6-1' of...

Merge tag 'irqchip-fixes-5.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into irq/urgent

Pull irqchip fixes for 5.6, take #1 from Marc Zyngier:

 - Guarantee allocation of L2 vPE table for GICv4.1
 - Fix GICv4.1 VPROPBASER programming
 - Numerous GICv4.1 tidy ups
 - Fix disabled GICv3 redistributor provisioning with ACPI
 - KConfig cleanup for C-SKY
parents f9f21cea 5186a6cc
......@@ -326,16 +326,16 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
#define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c)
/*
* GITS_VPROPBASER - hi and lo bits may be accessed independently.
* GICR_VPROPBASER - hi and lo bits may be accessed independently.
*/
#define gits_read_vpropbaser(c) __gic_readq_nonatomic(c)
#define gits_write_vpropbaser(v, c) __gic_writeq_nonatomic(v, c)
#define gicr_read_vpropbaser(c) __gic_readq_nonatomic(c)
#define gicr_write_vpropbaser(v, c) __gic_writeq_nonatomic(v, c)
/*
* GITS_VPENDBASER - the Valid bit must be cleared before changing
* GICR_VPENDBASER - the Valid bit must be cleared before changing
* anything else.
*/
static inline void gits_write_vpendbaser(u64 val, void __iomem *addr)
static inline void gicr_write_vpendbaser(u64 val, void __iomem *addr)
{
u32 tmp;
......@@ -352,7 +352,7 @@ static inline void gits_write_vpendbaser(u64 val, void __iomem *addr)
__gic_writeq_nonatomic(val, addr);
}
#define gits_read_vpendbaser(c) __gic_readq_nonatomic(c)
#define gicr_read_vpendbaser(c) __gic_readq_nonatomic(c)
static inline bool gic_prio_masking_enabled(void)
{
......
......@@ -140,11 +140,11 @@ static inline u32 gic_read_rpr(void)
#define gicr_write_pendbaser(v, c) writeq_relaxed(v, c)
#define gicr_read_pendbaser(c) readq_relaxed(c)
#define gits_write_vpropbaser(v, c) writeq_relaxed(v, c)
#define gits_read_vpropbaser(c) readq_relaxed(c)
#define gicr_write_vpropbaser(v, c) writeq_relaxed(v, c)
#define gicr_read_vpropbaser(c) readq_relaxed(c)
#define gits_write_vpendbaser(v, c) writeq_relaxed(v, c)
#define gits_read_vpendbaser(c) readq_relaxed(c)
#define gicr_write_vpendbaser(v, c) writeq_relaxed(v, c)
#define gicr_read_vpendbaser(c) readq_relaxed(c)
static inline bool gic_prio_masking_enabled(void)
{
......
......@@ -438,7 +438,7 @@ config CSKY_MPINTC
help
Say yes here to enable C-SKY SMP interrupt controller driver used
for C-SKY SMP system.
In fact it's not mmio map in hw and it use ld/st to visit the
In fact it's not mmio map in hardware and it uses ld/st to visit the
controller's register inside CPU.
config CSKY_APB_INTC
......@@ -446,7 +446,7 @@ config CSKY_APB_INTC
depends on CSKY
help
Say yes here to enable C-SKY APB interrupt controller driver used
by C-SKY single core SOC system. It use mmio map apb-bus to visit
by C-SKY single core SOC system. It uses mmio map apb-bus to visit
the controller's register.
config IMX_IRQSTEER
......
......@@ -661,7 +661,7 @@ static struct its_collection *its_build_invall_cmd(struct its_node *its,
struct its_cmd_desc *desc)
{
its_encode_cmd(cmd, GITS_CMD_INVALL);
its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
its_fixup_cmd(cmd);
......@@ -2376,6 +2376,8 @@ static u64 inherit_vpe_l1_table_from_its(void)
continue;
/* We have a winner! */
gic_data_rdist()->vpe_l1_base = its->tables[2].base;
val = GICR_VPROPBASER_4_1_VALID;
if (baser & GITS_BASER_INDIRECT)
val |= GICR_VPROPBASER_4_1_INDIRECT;
......@@ -2413,14 +2415,12 @@ static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
for_each_possible_cpu(cpu) {
void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
u32 tmp;
if (!base || cpu == smp_processor_id())
continue;
val = gic_read_typer(base + GICR_TYPER);
tmp = compute_common_aff(val);
if (tmp != aff)
if (aff != compute_common_aff(val))
continue;
/*
......@@ -2429,9 +2429,10 @@ static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
* ours wrt CommonLPIAff. Let's use its own VPROPBASER.
* Make sure we don't write the Z bit in that case.
*/
val = gits_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
val &= ~GICR_VPROPBASER_4_1_Z;
gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
*mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
return val;
......@@ -2440,6 +2441,72 @@ static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
return 0;
}
static bool allocate_vpe_l2_table(int cpu, u32 id)
{
void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
u64 val, gpsz, npg;
unsigned int psz, esz, idx;
struct page *page;
__le64 *table;
if (!gic_rdists->has_rvpeid)
return true;
val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1;
switch (gpsz) {
default:
WARN_ON(1);
/* fall through */
case GIC_PAGE_SIZE_4K:
psz = SZ_4K;
break;
case GIC_PAGE_SIZE_16K:
psz = SZ_16K;
break;
case GIC_PAGE_SIZE_64K:
psz = SZ_64K;
break;
}
/* Don't allow vpe_id that exceeds single, flat table limit */
if (!(val & GICR_VPROPBASER_4_1_INDIRECT))
return (id < (npg * psz / (esz * SZ_8)));
/* Compute 1st level table index & check if that exceeds table limit */
idx = id >> ilog2(psz / (esz * SZ_8));
if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE))
return false;
table = gic_data_rdist_cpu(cpu)->vpe_l1_base;
/* Allocate memory for 2nd level table */
if (!table[idx]) {
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
if (!page)
return false;
/* Flush Lvl2 table to PoC if hw doesn't support coherency */
if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
gic_flush_dcache_to_poc(page_address(page), psz);
table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
/* Flush Lvl1 entry to PoC if hw doesn't support coherency */
if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
/* Ensure updated table contents are visible to RD hardware */
dsb(sy);
}
return true;
}
static int allocate_vpe_l1_table(void)
{
void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
......@@ -2457,8 +2524,8 @@ static int allocate_vpe_l1_table(void)
* effect of making sure no doorbell will be generated and we can
* then safely clear VPROPBASER.Valid.
*/
if (gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
gits_write_vpendbaser(GICR_VPENDBASER_PendingLast,
if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
vlpi_base + GICR_VPENDBASER);
/*
......@@ -2481,8 +2548,8 @@ static int allocate_vpe_l1_table(void)
/* First probe the page size */
val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
val = gits_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
......@@ -2531,7 +2598,7 @@ static int allocate_vpe_l1_table(void)
npg = 1;
}
val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg);
val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1);
/* Right, that's the number of CPU pages we need for L1 */
np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
......@@ -2542,7 +2609,7 @@ static int allocate_vpe_l1_table(void)
if (!page)
return -ENOMEM;
gic_data_rdist()->vpe_l1_page = page;
gic_data_rdist()->vpe_l1_base = page_address(page);
pa = virt_to_phys(page_address(page));
WARN_ON(!IS_ALIGNED(pa, psz));
......@@ -2553,7 +2620,7 @@ static int allocate_vpe_l1_table(void)
val |= GICR_VPROPBASER_4_1_VALID;
out:
gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
......@@ -2660,14 +2727,14 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
bool clean;
u64 val;
val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
val &= ~GICR_VPENDBASER_Valid;
val &= ~clr;
val |= set;
gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
do {
val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
clean = !(val & GICR_VPENDBASER_Dirty);
if (!clean) {
count--;
......@@ -2782,7 +2849,7 @@ static void its_cpu_init_lpis(void)
val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
smp_processor_id(), val);
gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
/*
* Also clear Valid bit of GICR_VPENDBASER, in case some
......@@ -2790,7 +2857,6 @@ static void its_cpu_init_lpis(void)
* corrupting memory.
*/
val = its_clear_vpend_valid(vlpi_base, 0, 0);
WARN_ON(val & GICR_VPENDBASER_Dirty);
}
if (allocate_vpe_l1_table()) {
......@@ -2954,6 +3020,7 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
static bool its_alloc_vpe_table(u32 vpe_id)
{
struct its_node *its;
int cpu;
/*
* Make sure the L2 tables are allocated on *all* v4 ITSs. We
......@@ -2976,6 +3043,19 @@ static bool its_alloc_vpe_table(u32 vpe_id)
return false;
}
/* Non v4.1? No need to iterate RDs and go back early. */
if (!gic_rdists->has_rvpeid)
return true;
/*
* Make sure the L2 tables are allocated for all copies of
* the L1 table on *all* v4.1 RDs.
*/
for_each_possible_cpu(cpu) {
if (!allocate_vpe_l2_table(cpu, vpe_id))
return false;
}
return true;
}
......@@ -3443,7 +3523,7 @@ static void its_vpe_schedule(struct its_vpe *vpe)
val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
val |= GICR_VPROPBASER_RaWb;
val |= GICR_VPROPBASER_InnerShareable;
gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
val = virt_to_phys(page_address(vpe->vpt_page)) &
GENMASK_ULL(51, 16);
......@@ -3461,7 +3541,7 @@ static void its_vpe_schedule(struct its_vpe *vpe)
val |= GICR_VPENDBASER_PendingLast;
val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
val |= GICR_VPENDBASER_Valid;
gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
}
static void its_vpe_deschedule(struct its_vpe *vpe)
......@@ -3661,7 +3741,7 @@ static void its_vpe_4_1_schedule(struct its_vpe *vpe,
val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0;
val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
}
static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
......
......@@ -1839,6 +1839,7 @@ static struct
struct redist_region *redist_regs;
u32 nr_redist_regions;
bool single_redist;
int enabled_rdists;
u32 maint_irq;
int maint_irq_mode;
phys_addr_t vcpu_base;
......@@ -1933,8 +1934,10 @@ static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
* If GICC is enabled and has valid gicr base address, then it means
* GICR base is presented via GICC
*/
if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
acpi_data.enabled_rdists++;
return 0;
}
/*
* It's perfectly valid firmware can pass disabled GICC entry, driver
......@@ -1964,8 +1967,10 @@ static int __init gic_acpi_count_gicr_regions(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
gic_acpi_match_gicc, 0);
if (count > 0)
if (count > 0) {
acpi_data.single_redist = true;
count = acpi_data.enabled_rdists;
}
return count;
}
......
......@@ -652,10 +652,10 @@ struct rdists {
struct {
void __iomem *rd_base;
struct page *pend_page;
struct page *vpe_l1_page;
phys_addr_t phys_base;
bool lpi_enabled;
cpumask_t *vpe_table_mask;
void *vpe_l1_base;
} __percpu *rdist;
phys_addr_t prop_table_pa;
void *prop_table_va;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment