Commit 04debf21 authored by Mark Greer's avatar Mark Greer Committed by Michael Ellerman

powerpc: Remove core support for Marvell mv64x60 hostbridges

There are no longer any platforms that use Marvell's mv64x60
hostbridges so remove the supporting kernel code.

CC: Dale Farnsworth <dale@farnsworth.org>
Signed-off-by: default avatarMark Greer <mgreer@animalcreek.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 297f8314
This diff is collapsed.
......@@ -28,9 +28,6 @@ obj-$(CONFIG_FSL_85XX_CACHE_SRAM) += fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o
obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o
obj-$(CONFIG_FSL_RIO) += fsl_rio.o fsl_rmu.o
obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o
mv64x60-$(CONFIG_PCI) += mv64x60_pci.o
obj-$(CONFIG_MV64X60) += $(mv64x60-y) mv64x60_pic.o mv64x60_dev.o \
mv64x60_udbg.o
obj-$(CONFIG_RTC_DRV_CMOS) += rtc_cmos_setup.o
obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MV64X60_H__
#define __MV64X60_H__
#include <linux/init.h>
extern void __init mv64x60_init_irq(void);
extern unsigned int mv64x60_get_irq(void);
extern void __init mv64x60_pci_init(void);
extern void __init mv64x60_init_early(void);
#endif /* __MV64X60_H__ */
This diff is collapsed.
/*
* PCI bus setup for Marvell mv64360/mv64460 host bridges (Discovery)
*
* Author: Dale Farnsworth <dale@farnsworth.org>
*
* 2007 (c) MontaVista, Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/stat.h>
#include <linux/pci.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#define PCI_HEADER_TYPE_INVALID 0x7f /* Invalid PCI header type */
#ifdef CONFIG_SYSFS
/* 32-bit hex or dec stringified number + '\n' */
#define MV64X60_VAL_LEN_MAX 11
#define MV64X60_PCICFG_CPCI_HOTSWAP 0x68
static ssize_t mv64x60_hs_reg_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *phb;
u32 v;
if (off > 0)
return 0;
if (count < MV64X60_VAL_LEN_MAX)
return -EINVAL;
phb = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
if (!phb)
return -ENODEV;
pci_read_config_dword(phb, MV64X60_PCICFG_CPCI_HOTSWAP, &v);
pci_dev_put(phb);
return sprintf(buf, "0x%08x\n", v);
}
static ssize_t mv64x60_hs_reg_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *phb;
u32 v;
if (off > 0)
return 0;
if (count <= 0)
return -EINVAL;
if (sscanf(buf, "%i", &v) != 1)
return -EINVAL;
phb = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
if (!phb)
return -ENODEV;
pci_write_config_dword(phb, MV64X60_PCICFG_CPCI_HOTSWAP, v);
pci_dev_put(phb);
return count;
}
static const struct bin_attribute mv64x60_hs_reg_attr = { /* Hotswap register */
.attr = {
.name = "hs_reg",
.mode = 0644,
},
.size = MV64X60_VAL_LEN_MAX,
.read = mv64x60_hs_reg_read,
.write = mv64x60_hs_reg_write,
};
static int __init mv64x60_sysfs_init(void)
{
struct device_node *np;
struct platform_device *pdev;
const unsigned int *prop;
np = of_find_compatible_node(NULL, NULL, "marvell,mv64360");
if (!np)
return 0;
prop = of_get_property(np, "hs_reg_valid", NULL);
of_node_put(np);
pdev = platform_device_register_simple("marvell,mv64360", 0, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
return sysfs_create_bin_file(&pdev->dev.kobj, &mv64x60_hs_reg_attr);
}
subsys_initcall(mv64x60_sysfs_init);
#endif /* CONFIG_SYSFS */
static void mv64x60_pci_fixup_early(struct pci_dev *dev)
{
/*
* Set the host bridge hdr_type to an invalid value so that
* pci_setup_device() will ignore the host bridge.
*/
dev->hdr_type = PCI_HEADER_TYPE_INVALID;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_MV64360,
mv64x60_pci_fixup_early);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_MV64460,
mv64x60_pci_fixup_early);
static int __init mv64x60_add_bridge(struct device_node *dev)
{
int len;
struct pci_controller *hose;
struct resource rsrc;
const int *bus_range;
int primary;
memset(&rsrc, 0, sizeof(rsrc));
/* Fetch host bridge registers address */
if (of_address_to_resource(dev, 0, &rsrc)) {
printk(KERN_ERR "No PCI reg property in device tree\n");
return -ENODEV;
}
/* Get bus range if any */
bus_range = of_get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int))
printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
" bus 0\n", dev);
hose = pcibios_alloc_controller(dev);
if (!hose)
return -ENOMEM;
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
setup_indirect_pci(hose, rsrc.start, rsrc.start + 4, 0);
hose->self_busno = hose->first_busno;
printk(KERN_INFO "Found MV64x60 PCI host bridge at 0x%016llx. "
"Firmware bus number: %d->%d\n",
(unsigned long long)rsrc.start, hose->first_busno,
hose->last_busno);
/* Interpret the "ranges" property */
/* This also maps the I/O region and sets isa_io/mem_base */
primary = (hose->first_busno == 0);
pci_process_bridge_OF_ranges(hose, dev, primary);
return 0;
}
void __init mv64x60_pci_init(void)
{
struct device_node *np;
for_each_compatible_node(np, "pci", "marvell,mv64360-pci")
mv64x60_add_bridge(np);
}
/*
* Interrupt handling for Marvell mv64360/mv64460 host bridges (Discovery)
*
* Author: Dale Farnsworth <dale@farnsworth.org>
*
* 2007 (c) MontaVista, Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/irq.h>
#include "mv64x60.h"
/* Interrupt Controller Interface Registers */
#define MV64X60_IC_MAIN_CAUSE_LO 0x0004
#define MV64X60_IC_MAIN_CAUSE_HI 0x000c
#define MV64X60_IC_CPU0_INTR_MASK_LO 0x0014
#define MV64X60_IC_CPU0_INTR_MASK_HI 0x001c
#define MV64X60_IC_CPU0_SELECT_CAUSE 0x0024
#define MV64X60_HIGH_GPP_GROUPS 0x0f000000
#define MV64X60_SELECT_CAUSE_HIGH 0x40000000
/* General Purpose Pins Controller Interface Registers */
#define MV64x60_GPP_INTR_CAUSE 0x0008
#define MV64x60_GPP_INTR_MASK 0x000c
#define MV64x60_LEVEL1_LOW 0
#define MV64x60_LEVEL1_HIGH 1
#define MV64x60_LEVEL1_GPP 2
#define MV64x60_LEVEL1_MASK 0x00000060
#define MV64x60_LEVEL1_OFFSET 5
#define MV64x60_LEVEL2_MASK 0x0000001f
#define MV64x60_NUM_IRQS 96
static DEFINE_SPINLOCK(mv64x60_lock);
static void __iomem *mv64x60_irq_reg_base;
static void __iomem *mv64x60_gpp_reg_base;
/*
* Interrupt Controller Handling
*
* The interrupt controller handles three groups of interrupts:
* main low: IRQ0-IRQ31
* main high: IRQ32-IRQ63
* gpp: IRQ64-IRQ95
*
* This code handles interrupts in two levels. Level 1 selects the
* interrupt group, and level 2 selects an IRQ within that group.
* Each group has its own irq_chip structure.
*/
static u32 mv64x60_cached_low_mask;
static u32 mv64x60_cached_high_mask = MV64X60_HIGH_GPP_GROUPS;
static u32 mv64x60_cached_gpp_mask;
static struct irq_domain *mv64x60_irq_host;
/*
* mv64x60_chip_low functions
*/
static void mv64x60_mask_low(struct irq_data *d)
{
int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
unsigned long flags;
spin_lock_irqsave(&mv64x60_lock, flags);
mv64x60_cached_low_mask &= ~(1 << level2);
out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO,
mv64x60_cached_low_mask);
spin_unlock_irqrestore(&mv64x60_lock, flags);
(void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO);
}
static void mv64x60_unmask_low(struct irq_data *d)
{
int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
unsigned long flags;
spin_lock_irqsave(&mv64x60_lock, flags);
mv64x60_cached_low_mask |= 1 << level2;
out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO,
mv64x60_cached_low_mask);
spin_unlock_irqrestore(&mv64x60_lock, flags);
(void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO);
}
static struct irq_chip mv64x60_chip_low = {
.name = "mv64x60_low",
.irq_mask = mv64x60_mask_low,
.irq_mask_ack = mv64x60_mask_low,
.irq_unmask = mv64x60_unmask_low,
};
/*
* mv64x60_chip_high functions
*/
static void mv64x60_mask_high(struct irq_data *d)
{
int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
unsigned long flags;
spin_lock_irqsave(&mv64x60_lock, flags);
mv64x60_cached_high_mask &= ~(1 << level2);
out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI,
mv64x60_cached_high_mask);
spin_unlock_irqrestore(&mv64x60_lock, flags);
(void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI);
}
static void mv64x60_unmask_high(struct irq_data *d)
{
int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
unsigned long flags;
spin_lock_irqsave(&mv64x60_lock, flags);
mv64x60_cached_high_mask |= 1 << level2;
out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI,
mv64x60_cached_high_mask);
spin_unlock_irqrestore(&mv64x60_lock, flags);
(void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI);
}
static struct irq_chip mv64x60_chip_high = {
.name = "mv64x60_high",
.irq_mask = mv64x60_mask_high,
.irq_mask_ack = mv64x60_mask_high,
.irq_unmask = mv64x60_unmask_high,
};
/*
* mv64x60_chip_gpp functions
*/
static void mv64x60_mask_gpp(struct irq_data *d)
{
int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
unsigned long flags;
spin_lock_irqsave(&mv64x60_lock, flags);
mv64x60_cached_gpp_mask &= ~(1 << level2);
out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
mv64x60_cached_gpp_mask);
spin_unlock_irqrestore(&mv64x60_lock, flags);
(void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK);
}
static void mv64x60_mask_ack_gpp(struct irq_data *d)
{
int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
unsigned long flags;
spin_lock_irqsave(&mv64x60_lock, flags);
mv64x60_cached_gpp_mask &= ~(1 << level2);
out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
mv64x60_cached_gpp_mask);
out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE,
~(1 << level2));
spin_unlock_irqrestore(&mv64x60_lock, flags);
(void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE);
}
static void mv64x60_unmask_gpp(struct irq_data *d)
{
int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
unsigned long flags;
spin_lock_irqsave(&mv64x60_lock, flags);
mv64x60_cached_gpp_mask |= 1 << level2;
out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
mv64x60_cached_gpp_mask);
spin_unlock_irqrestore(&mv64x60_lock, flags);
(void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK);
}
static struct irq_chip mv64x60_chip_gpp = {
.name = "mv64x60_gpp",
.irq_mask = mv64x60_mask_gpp,
.irq_mask_ack = mv64x60_mask_ack_gpp,
.irq_unmask = mv64x60_unmask_gpp,
};
/*
* mv64x60_host_ops functions
*/
static struct irq_chip *mv64x60_chips[] = {
[MV64x60_LEVEL1_LOW] = &mv64x60_chip_low,
[MV64x60_LEVEL1_HIGH] = &mv64x60_chip_high,
[MV64x60_LEVEL1_GPP] = &mv64x60_chip_gpp,
};
static int mv64x60_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hwirq)
{
int level1;
irq_set_status_flags(virq, IRQ_LEVEL);
level1 = (hwirq & MV64x60_LEVEL1_MASK) >> MV64x60_LEVEL1_OFFSET;
BUG_ON(level1 > MV64x60_LEVEL1_GPP);
irq_set_chip_and_handler(virq, mv64x60_chips[level1],
handle_level_irq);
return 0;
}
static const struct irq_domain_ops mv64x60_host_ops = {
.map = mv64x60_host_map,
};
/*
* Global functions
*/
void __init mv64x60_init_irq(void)
{
struct device_node *np;
phys_addr_t paddr;
unsigned int size;
const unsigned int *reg;
unsigned long flags;
np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-gpp");
reg = of_get_property(np, "reg", &size);
paddr = of_translate_address(np, reg);
mv64x60_gpp_reg_base = ioremap(paddr, reg[1]);
of_node_put(np);
np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-pic");
reg = of_get_property(np, "reg", &size);
paddr = of_translate_address(np, reg);
mv64x60_irq_reg_base = ioremap(paddr, reg[1]);
mv64x60_irq_host = irq_domain_add_linear(np, MV64x60_NUM_IRQS,
&mv64x60_host_ops, NULL);
spin_lock_irqsave(&mv64x60_lock, flags);
out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
mv64x60_cached_gpp_mask);
out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO,
mv64x60_cached_low_mask);
out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI,
mv64x60_cached_high_mask);
out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE, 0);
out_le32(mv64x60_irq_reg_base + MV64X60_IC_MAIN_CAUSE_LO, 0);
out_le32(mv64x60_irq_reg_base + MV64X60_IC_MAIN_CAUSE_HI, 0);
spin_unlock_irqrestore(&mv64x60_lock, flags);
}
unsigned int mv64x60_get_irq(void)
{
u32 cause;
int level1;
irq_hw_number_t hwirq;
int virq = 0;
cause = in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_SELECT_CAUSE);
if (cause & MV64X60_SELECT_CAUSE_HIGH) {
cause &= mv64x60_cached_high_mask;
level1 = MV64x60_LEVEL1_HIGH;
if (cause & MV64X60_HIGH_GPP_GROUPS) {
cause = in_le32(mv64x60_gpp_reg_base +
MV64x60_GPP_INTR_CAUSE);
cause &= mv64x60_cached_gpp_mask;
level1 = MV64x60_LEVEL1_GPP;
}
} else {
cause &= mv64x60_cached_low_mask;
level1 = MV64x60_LEVEL1_LOW;
}
if (cause) {
hwirq = (level1 << MV64x60_LEVEL1_OFFSET) | __ilog2(cause);
virq = irq_linear_revmap(mv64x60_irq_host, hwirq);
}
return virq;
}
/*
* udbg serial input/output routines for the Marvell MV64x60 (Discovery).
*
* Author: Dale Farnsworth <dale@farnsworth.org>
*
* 2007 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include <sysdev/mv64x60.h>
#define MPSC_0_CR1_OFFSET 0x000c
#define MPSC_0_CR2_OFFSET 0x0010
#define MPSC_CHR_2_TCS (1 << 9)
#define MPSC_0_CHR_10_OFFSET 0x0030
#define MPSC_INTR_CAUSE_OFF_0 0x0004
#define MPSC_INTR_CAUSE_OFF_1 0x000c
#define MPSC_INTR_CAUSE_RCC (1<<6)
static void __iomem *mpsc_base;
static void __iomem *mpsc_intr_cause;
static void mv64x60_udbg_putc(char c)
{
if (c == '\n')
mv64x60_udbg_putc('\r');
while(in_le32(mpsc_base + MPSC_0_CR2_OFFSET) & MPSC_CHR_2_TCS)
;
out_le32(mpsc_base + MPSC_0_CR1_OFFSET, c);
out_le32(mpsc_base + MPSC_0_CR2_OFFSET, MPSC_CHR_2_TCS);
}
static int mv64x60_udbg_testc(void)
{
return (in_le32(mpsc_intr_cause) & MPSC_INTR_CAUSE_RCC) != 0;
}
static int mv64x60_udbg_getc(void)
{
int cause = 0;
int c;
while (!mv64x60_udbg_testc())
;
c = in_8(mpsc_base + MPSC_0_CHR_10_OFFSET + 2);
out_8(mpsc_base + MPSC_0_CHR_10_OFFSET + 2, c);
out_le32(mpsc_intr_cause, cause & ~MPSC_INTR_CAUSE_RCC);
return c;
}
static int mv64x60_udbg_getc_poll(void)
{
if (!mv64x60_udbg_testc())
return -1;
return mv64x60_udbg_getc();
}
static void mv64x60_udbg_init(void)
{
struct device_node *np, *mpscintr, *stdout = NULL;
const char *path;
const phandle *ph;
struct resource r[2];
const int *block_index;
int intr_cause_offset;
int err;
path = of_get_property(of_chosen, "linux,stdout-path", NULL);
if (!path)
return;
stdout = of_find_node_by_path(path);
if (!stdout)
return;
for_each_compatible_node(np, NULL, "marvell,mv64360-mpsc") {
if (np == stdout)
break;
}
of_node_put(stdout);
if (!np)
return;
block_index = of_get_property(np, "cell-index", NULL);
if (!block_index)
goto error;
switch (*block_index) {
case 0:
intr_cause_offset = MPSC_INTR_CAUSE_OFF_0;
break;
case 1:
intr_cause_offset = MPSC_INTR_CAUSE_OFF_1;
break;
default:
goto error;
}
err = of_address_to_resource(np, 0, &r[0]);
if (err)
goto error;
ph = of_get_property(np, "mpscintr", NULL);
mpscintr = of_find_node_by_phandle(*ph);
if (!mpscintr)
goto error;
err = of_address_to_resource(mpscintr, 0, &r[1]);
of_node_put(mpscintr);
if (err)
goto error;
of_node_put(np);
mpsc_base = ioremap(r[0].start, resource_size(&r[0]));
if (!mpsc_base)
return;
mpsc_intr_cause = ioremap(r[1].start, resource_size(&r[1]));
if (!mpsc_intr_cause) {
iounmap(mpsc_base);
return;
}
mpsc_intr_cause += intr_cause_offset;
udbg_putc = mv64x60_udbg_putc;
udbg_getc = mv64x60_udbg_getc;
udbg_getc_poll = mv64x60_udbg_getc_poll;
return;
error:
of_node_put(np);
}
void mv64x60_init_early(void)
{
mv64x60_udbg_init();
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment