Commit 39b8d525 authored by Ralf Baechle's avatar Ralf Baechle

[MIPS] Add support for MIPS CMP platform.

Signed-off-by: default avatarChris Dearman <chris@mips.com>
Signed-off-by: default avatarAtsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 30840244
......@@ -221,6 +221,7 @@ config MIPS_MALTA
select DMA_NONCOHERENT
select GENERIC_ISA_DMA
select IRQ_CPU
select IRQ_GIC
select HW_HAS_PCI
select I8253
select I8259
......@@ -840,6 +841,9 @@ config MIPS_NILE4
config MIPS_DISABLE_OBSOLETE_IDE
bool
config SYNC_R4K
bool
config NO_IOPORT
def_bool n
......@@ -909,6 +913,9 @@ config IRQ_TXX9
config IRQ_GT641XX
bool
config IRQ_GIC
bool
config MIPS_BOARDS_GEN
bool
......@@ -1811,6 +1818,17 @@ config NR_CPUS
performance should round up your number of processors to the next
power of two.
config MIPS_CMP
bool "MIPS CMP framework support"
depends on SMP
select SYNC_R4K
select SYS_SUPPORTS_SCHED_SMT
select WEAK_ORDERING
default n
help
This is a placeholder option for the GCMP work. It will need to
be handled differently...
source "kernel/time/Kconfig"
#
......
......@@ -16,6 +16,7 @@ obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o
obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o
obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o
obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \
irix5sys.o sysirix.o
......@@ -50,6 +51,7 @@ obj-$(CONFIG_MIPS_MT) += mips-mt.o
obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
obj-$(CONFIG_CPU_MIPSR2) += spram.o
obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o
......@@ -63,6 +65,7 @@ obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o
obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o
obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o
obj-$(CONFIG_IRQ_GIC) += irq-gic.o
obj-$(CONFIG_32BIT) += scall32-o32.o
obj-$(CONFIG_64BIT) += scall64-64.o
......
......@@ -169,6 +169,7 @@ static inline void check_wait(void)
case CPU_24K:
case CPU_34K:
case CPU_1004K:
cpu_wait = r4k_wait;
if (read_c0_config7() & MIPS_CONF7_WII)
cpu_wait = r4k_wait_irqoff;
......@@ -717,6 +718,9 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c)
case PRID_IMP_74K:
c->cputype = CPU_74K;
break;
case PRID_IMP_1004K:
c->cputype = CPU_1004K;
break;
}
spram_config();
......@@ -884,6 +888,7 @@ static __cpuinit const char *cpu_to_name(struct cpuinfo_mips *c)
case CPU_24K: name = "MIPS 24K"; break;
case CPU_25KF: name = "MIPS 25Kf"; break;
case CPU_34K: name = "MIPS 34K"; break;
case CPU_1004K: name = "MIPS 1004K"; break;
case CPU_74K: name = "MIPS 74K"; break;
case CPU_VR4111: name = "NEC VR4111"; break;
case CPU_VR4121: name = "NEC VR4121"; break;
......
#undef DEBUG
#include <linux/bitmap.h>
#include <linux/init.h>
#include <asm/io.h>
#include <asm/gic.h>
#include <asm/gcmpregs.h>
#include <asm/mips-boards/maltaint.h>
#include <asm/irq.h>
#include <linux/hardirq.h>
#include <asm-generic/bitops/find.h>
static unsigned long _gic_base;
static unsigned int _irqbase, _mapsize, numvpes, numintrs;
static struct gic_intr_map *_intrmap;
static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
static struct gic_pending_regs pending_regs[NR_CPUS];
static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
#define gic_wedgeb2bok 0 /*
* Can GIC handle b2b writes to wedge register?
*/
#if gic_wedgeb2bok == 0
static DEFINE_SPINLOCK(gic_wedgeb2b_lock);
#endif
void gic_send_ipi(unsigned int intr)
{
#if gic_wedgeb2bok == 0
unsigned long flags;
#endif
pr_debug("CPU%d: %s status %08x\n", smp_processor_id(), __func__,
read_c0_status());
if (!gic_wedgeb2bok)
spin_lock_irqsave(&gic_wedgeb2b_lock, flags);
GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr);
if (!gic_wedgeb2bok) {
(void) GIC_REG(SHARED, GIC_SH_CONFIG);
spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags);
}
}
/* This is Malta specific and needs to be exported */
static void vpe_local_setup(unsigned int numvpes)
{
int i;
unsigned long timer_interrupt = 5, perf_interrupt = 5;
unsigned int vpe_ctl;
/*
* Setup the default performance counter timer interrupts
* for all VPEs
*/
for (i = 0; i < numvpes; i++) {
GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
/* Are Interrupts locally routable? */
GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl);
if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK)
GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
GIC_MAP_TO_PIN_MSK | timer_interrupt);
if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK)
GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
GIC_MAP_TO_PIN_MSK | perf_interrupt);
}
}
unsigned int gic_get_int(void)
{
unsigned int i;
unsigned long *pending, *intrmask, *pcpu_mask;
unsigned long *pending_abs, *intrmask_abs;
/* Get per-cpu bitmaps */
pending = pending_regs[smp_processor_id()].pending;
intrmask = intrmask_regs[smp_processor_id()].intrmask;
pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
pending_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
GIC_SH_PEND_31_0_OFS);
intrmask_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
GIC_SH_MASK_31_0_OFS);
for (i = 0; i < BITS_TO_LONGS(GIC_NUM_INTRS); i++) {
GICREAD(*pending_abs, pending[i]);
GICREAD(*intrmask_abs, intrmask[i]);
pending_abs++;
intrmask_abs++;
}
bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS);
bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS);
i = find_first_bit(pending, GIC_NUM_INTRS);
pr_debug("CPU%d: %s pend=%d\n", smp_processor_id(), __func__, i);
return i;
}
static unsigned int gic_irq_startup(unsigned int irq)
{
pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
irq -= _irqbase;
/* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_SMASK_31_0_OFS + (irq / 32))),
1 << (irq % 32));
return 0;
}
static void gic_irq_ack(unsigned int irq)
{
#if gic_wedgeb2bok == 0
unsigned long flags;
#endif
pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
irq -= _irqbase;
GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_RMASK_31_0_OFS + (irq / 32))),
1 << (irq % 32));
if (_intrmap[irq].trigtype == GIC_TRIG_EDGE) {
if (!gic_wedgeb2bok)
spin_lock_irqsave(&gic_wedgeb2b_lock, flags);
GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
if (!gic_wedgeb2bok) {
(void) GIC_REG(SHARED, GIC_SH_CONFIG);
spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags);
}
}
}
static void gic_mask_irq(unsigned int irq)
{
pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
irq -= _irqbase;
/* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_RMASK_31_0_OFS + (irq / 32))),
1 << (irq % 32));
}
static void gic_unmask_irq(unsigned int irq)
{
pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
irq -= _irqbase;
/* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_SMASK_31_0_OFS + (irq / 32))),
1 << (irq % 32));
}
#ifdef CONFIG_SMP
static DEFINE_SPINLOCK(gic_lock);
static void gic_set_affinity(unsigned int irq, cpumask_t cpumask)
{
cpumask_t tmp = CPU_MASK_NONE;
unsigned long flags;
int i;
pr_debug(KERN_DEBUG "%s called\n", __func__);
irq -= _irqbase;
cpus_and(tmp, cpumask, cpu_online_map);
if (cpus_empty(tmp))
return;
/* Assumption : cpumask refers to a single CPU */
spin_lock_irqsave(&gic_lock, flags);
for (;;) {
/* Re-route this IRQ */
GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
/*
* FIXME: assumption that _intrmap is ordered and has no holes
*/
/* Update the intr_map */
_intrmap[irq].cpunum = first_cpu(tmp);
/* Update the pcpu_masks */
for (i = 0; i < NR_CPUS; i++)
clear_bit(irq, pcpu_masks[i].pcpu_mask);
set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
}
irq_desc[irq].affinity = cpumask;
spin_unlock_irqrestore(&gic_lock, flags);
}
#endif
static struct irq_chip gic_irq_controller = {
.name = "MIPS GIC",
.startup = gic_irq_startup,
.ack = gic_irq_ack,
.mask = gic_mask_irq,
.mask_ack = gic_mask_irq,
.unmask = gic_unmask_irq,
.eoi = gic_unmask_irq,
#ifdef CONFIG_SMP
.set_affinity = gic_set_affinity,
#endif
};
static void __init setup_intr(unsigned int intr, unsigned int cpu,
unsigned int pin, unsigned int polarity, unsigned int trigtype)
{
/* Setup Intr to Pin mapping */
if (pin & GIC_MAP_TO_NMI_MSK) {
GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
/* FIXME: hack to route NMI to all cpu's */
for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
GICWRITE(GIC_REG_ADDR(SHARED,
GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
0xffffffff);
}
} else {
GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)),
GIC_MAP_TO_PIN_MSK | pin);
/* Setup Intr to CPU mapping */
GIC_SH_MAP_TO_VPE_SMASK(intr, cpu);
}
/* Setup Intr Polarity */
GIC_SET_POLARITY(intr, polarity);
/* Setup Intr Trigger Type */
GIC_SET_TRIGGER(intr, trigtype);
/* Init Intr Masks */
GIC_SET_INTR_MASK(intr, 0);
}
static void __init gic_basic_init(void)
{
unsigned int i, cpu;
/* Setup defaults */
for (i = 0; i < GIC_NUM_INTRS; i++) {
GIC_SET_POLARITY(i, GIC_POL_POS);
GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL);
GIC_SET_INTR_MASK(i, 0);
}
/* Setup specifics */
for (i = 0; i < _mapsize; i++) {
cpu = _intrmap[i].cpunum;
if (cpu == X)
continue;
setup_intr(_intrmap[i].intrnum,
_intrmap[i].cpunum,
_intrmap[i].pin,
_intrmap[i].polarity,
_intrmap[i].trigtype);
/* Initialise per-cpu Interrupt software masks */
if (_intrmap[i].ipiflag)
set_bit(_intrmap[i].intrnum, pcpu_masks[cpu].pcpu_mask);
}
vpe_local_setup(numvpes);
for (i = _irqbase; i < (_irqbase + numintrs); i++)
set_irq_chip(i, &gic_irq_controller);
}
void __init gic_init(unsigned long gic_base_addr,
unsigned long gic_addrspace_size,
struct gic_intr_map *intr_map, unsigned int intr_map_size,
unsigned int irqbase)
{
unsigned int gicconfig;
_gic_base = (unsigned long) ioremap_nocache(gic_base_addr,
gic_addrspace_size);
_irqbase = irqbase;
_intrmap = intr_map;
_mapsize = intr_map_size;
GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
GIC_SH_CONFIG_NUMINTRS_SHF;
numintrs = ((numintrs + 1) * 8);
numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
GIC_SH_CONFIG_NUMVPES_SHF;
pr_debug("%s called\n", __func__);
gic_basic_init();
}
/*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Copyright (C) 2007 MIPS Technologies, Inc.
* Chris Dearman (chris@mips.com)
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/compiler.h>
#include <asm/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
/*
* Crude manipulation of the CPU masks to control which
* which CPU's are brought online during initialisation
*
* Beware... this needs to be called after CPU discovery
* but before CPU bringup
*/
static int __init allowcpus(char *str)
{
cpumask_t cpu_allow_map;
char buf[256];
int len;
cpus_clear(cpu_allow_map);
if (cpulist_parse(str, cpu_allow_map) == 0) {
cpu_set(0, cpu_allow_map);
cpus_and(cpu_possible_map, cpu_possible_map, cpu_allow_map);
len = cpulist_scnprintf(buf, sizeof(buf)-1, cpu_possible_map);
buf[len] = '\0';
pr_debug("Allowable CPUs: %s\n", buf);
return 1;
} else
return 0;
}
__setup("allowcpus=", allowcpus);
static void ipi_call_function(unsigned int cpu)
{
unsigned int action = 0;
pr_debug("CPU%d: %s cpu %d status %08x\n",
smp_processor_id(), __func__, cpu, read_c0_status());
switch (cpu) {
case 0:
action = GIC_IPI_EXT_INTR_CALLFNC_VPE0;
break;
case 1:
action = GIC_IPI_EXT_INTR_CALLFNC_VPE1;
break;
case 2:
action = GIC_IPI_EXT_INTR_CALLFNC_VPE2;
break;
case 3:
action = GIC_IPI_EXT_INTR_CALLFNC_VPE3;
break;
}
gic_send_ipi(action);
}
static void ipi_resched(unsigned int cpu)
{
unsigned int action = 0;
pr_debug("CPU%d: %s cpu %d status %08x\n",
smp_processor_id(), __func__, cpu, read_c0_status());
switch (cpu) {
case 0:
action = GIC_IPI_EXT_INTR_RESCHED_VPE0;
break;
case 1:
action = GIC_IPI_EXT_INTR_RESCHED_VPE1;
break;
case 2:
action = GIC_IPI_EXT_INTR_RESCHED_VPE2;
break;
case 3:
action = GIC_IPI_EXT_INTR_RESCHED_VPE3;
break;
}
gic_send_ipi(action);
}
/*
* FIXME: This isn't restricted to CMP
* The SMVP kernel could use GIC interrupts if available
*/
void cmp_send_ipi_single(int cpu, unsigned int action)
{
unsigned long flags;
local_irq_save(flags);
switch (action) {
case SMP_CALL_FUNCTION:
ipi_call_function(cpu);
break;
case SMP_RESCHEDULE_YOURSELF:
ipi_resched(cpu);
break;
}
local_irq_restore(flags);
}
static void cmp_send_ipi_mask(cpumask_t mask, unsigned int action)
{
unsigned int i;
for_each_cpu_mask(i, mask)
cmp_send_ipi_single(i, action);
}
static void cmp_init_secondary(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
/* Assume GIC is present */
change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 |
STATUSF_IP7);
/* Enable per-cpu interrupts: platform specific */
c->core = (read_c0_ebase() >> 1) & 0xff;
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE;
#endif
#ifdef CONFIG_MIPS_MT_SMTC
c->tc_id = (read_c0_tcbind() >> TCBIND_CURTC_SHIFT) & TCBIND_CURTC;
#endif
}
static void cmp_smp_finish(void)
{
pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
/* CDFIXME: remove this? */
write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(smp_processor_id(), mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
local_irq_enable();
}
static void cmp_cpus_done(void)
{
pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
}
/*
* Setup the PC, SP, and GP of a secondary processor and start it running
* smp_bootstrap is the place to resume from
* __KSTK_TOS(idle) is apparently the stack pointer
* (unsigned long)idle->thread_info the gp
*/
static void cmp_boot_secondary(int cpu, struct task_struct *idle)
{
struct thread_info *gp = task_thread_info(idle);
unsigned long sp = __KSTK_TOS(idle);
unsigned long pc = (unsigned long)&smp_bootstrap;
unsigned long a0 = 0;
pr_debug("SMPCMP: CPU%d: %s cpu %d\n", smp_processor_id(),
__func__, cpu);
#if 0
/* Needed? */
flush_icache_range((unsigned long)gp,
(unsigned long)(gp + sizeof(struct thread_info)));
#endif
amon_cpu_start(cpu, pc, sp, gp, a0);
}
/*
* Common setup before any secondaries are started
*/
void __init cmp_smp_setup(void)
{
int i;
int ncpu = 0;
pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(0, mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
for (i = 1; i < NR_CPUS; i++) {
if (amon_cpu_avail(i)) {
cpu_set(i, phys_cpu_present_map);
__cpu_number_map[i] = ++ncpu;
__cpu_logical_map[ncpu] = i;
}
}
if (cpu_has_mipsmt) {
unsigned int nvpe, mvpconf0 = read_c0_mvpconf0();
nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
smp_num_siblings = nvpe;
}
pr_info("Detected %i available secondary CPU(s)\n", ncpu);
}
void __init cmp_prepare_cpus(unsigned int max_cpus)
{
pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n",
smp_processor_id(), __func__, max_cpus);
/*
* FIXME: some of these options are per-system, some per-core and
* some per-cpu
*/
mips_mt_set_cpuoptions();
}
struct plat_smp_ops cmp_smp_ops = {
.send_ipi_single = cmp_send_ipi_single,
.send_ipi_mask = cmp_send_ipi_mask,
.init_secondary = cmp_init_secondary,
.smp_finish = cmp_smp_finish,
.cpus_done = cmp_cpus_done,
.boot_secondary = cmp_boot_secondary,
.smp_setup = cmp_smp_setup,
.prepare_cpus = cmp_prepare_cpus,
};
......@@ -36,63 +36,7 @@
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
#define MIPS_CPU_IPI_RESCHED_IRQ 0
#define MIPS_CPU_IPI_CALL_IRQ 1
static int cpu_ipi_resched_irq, cpu_ipi_call_irq;
#if 0
static void dump_mtregisters(int vpe, int tc)
{
printk("vpe %d tc %d\n", vpe, tc);
settc(tc);
printk(" c0 status 0x%lx\n", read_vpe_c0_status());
printk(" vpecontrol 0x%lx\n", read_vpe_c0_vpecontrol());
printk(" vpeconf0 0x%lx\n", read_vpe_c0_vpeconf0());
printk(" tcstatus 0x%lx\n", read_tc_c0_tcstatus());
printk(" tcrestart 0x%lx\n", read_tc_c0_tcrestart());
printk(" tcbind 0x%lx\n", read_tc_c0_tcbind());
printk(" tchalt 0x%lx\n", read_tc_c0_tchalt());
}
#endif
static void ipi_resched_dispatch(void)
{
do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
}
static void ipi_call_dispatch(void)
{
do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
}
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
{
return IRQ_HANDLED;
}
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
smp_call_function_interrupt();
return IRQ_HANDLED;
}
static struct irqaction irq_resched = {
.handler = ipi_resched_interrupt,
.flags = IRQF_DISABLED|IRQF_PERCPU,
.name = "IPI_resched"
};
static struct irqaction irq_call = {
.handler = ipi_call_interrupt,
.flags = IRQF_DISABLED|IRQF_PERCPU,
.name = "IPI_call"
};
static void __init smp_copy_vpe_config(void)
static void __init smvp_copy_vpe_config(void)
{
write_vpe_c0_status(
(read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
......@@ -109,7 +53,7 @@ static void __init smp_copy_vpe_config(void)
write_vpe_c0_count(read_c0_count());
}
static unsigned int __init smp_vpe_init(unsigned int tc, unsigned int mvpconf0,
static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
unsigned int ncpu)
{
if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT))
......@@ -135,12 +79,12 @@ static unsigned int __init smp_vpe_init(unsigned int tc, unsigned int mvpconf0,
write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
if (tc != 0)
smp_copy_vpe_config();
smvp_copy_vpe_config();
return ncpu;
}
static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0)
static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
{
unsigned long tmp;
......@@ -207,15 +151,20 @@ static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action)
static void __cpuinit vsmp_init_secondary(void)
{
/* Enable per-cpu interrupts */
extern int gic_present;
/* This is Malta specific: IPI,performance and timer inetrrupts */
write_c0_status((read_c0_status() & ~ST0_IM ) |
(STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
if (gic_present)
change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
STATUSF_IP6 | STATUSF_IP7);
else
change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
STATUSF_IP6 | STATUSF_IP7);
}
static void __cpuinit vsmp_smp_finish(void)
{
/* CDFIXME: remove this? */
write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
#ifdef CONFIG_MIPS_MT_FPAFF
......@@ -276,7 +225,7 @@ static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
/*
* Common setup before any secondaries are started
* Make sure all CPU's are in a sensible state before we boot any of the
* secondarys
* secondaries
*/
static void __init vsmp_smp_setup(void)
{
......@@ -309,8 +258,8 @@ static void __init vsmp_smp_setup(void)
for (tc = 0; tc <= ntc; tc++) {
settc(tc);
smp_tc_init(tc, mvpconf0);
ncpu = smp_vpe_init(tc, mvpconf0, ncpu);
smvp_tc_init(tc, mvpconf0);
ncpu = smvp_vpe_init(tc, mvpconf0, ncpu);
}
/* Release config state */
......@@ -324,21 +273,6 @@ static void __init vsmp_smp_setup(void)
static void __init vsmp_prepare_cpus(unsigned int max_cpus)
{
mips_mt_set_cpuoptions();
/* set up ipi interrupts */
if (cpu_has_vint) {
set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
}
cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
setup_irq(cpu_ipi_resched_irq, &irq_resched);
setup_irq(cpu_ipi_call_irq, &irq_call);
set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
}
struct plat_smp_ops vsmp_smp_ops = {
......
......@@ -35,6 +35,7 @@
#include <asm/atomic.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <asm/r4k-timer.h>
#include <asm/system.h>
#include <asm/mmu_context.h>
#include <asm/time.h>
......@@ -125,6 +126,8 @@ asmlinkage __cpuinit void start_secondary(void)
cpu_set(cpu, cpu_callin_map);
synchronise_count_slave();
cpu_idle();
}
......@@ -287,6 +290,7 @@ void smp_send_stop(void)
void __init smp_cpus_done(unsigned int max_cpus)
{
mp_ops->cpus_done();
synchronise_count_master();
}
/* called from main before smp_init() */
......
......@@ -331,7 +331,8 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
/* In general, all TCs should have the same cpu_data indications */
memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
/* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
if (cpu_data[0].cputype == CPU_34K)
if (cpu_data[0].cputype == CPU_34K ||
cpu_data[0].cputype == CPU_1004K)
cpu_data[cpu].options &= ~MIPS_CPU_FPU;
cpu_data[cpu].vpe_id = vpe;
cpu_data[cpu].tc_id = tc;
......
/*
* Count register synchronisation.
*
* All CPUs will have their count registers synchronised to the CPU0 expirelo
* value. This can cause a small timewarp for CPU0. All other CPU's should
* not have done anything significant (but they may have had interrupts
* enabled briefly - prom_smp_finish() should not be responsible for enabling
* interrupts...)
*
* FIXME: broken for SMTC
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irqflags.h>
#include <linux/r4k-timer.h>
#include <asm/atomic.h>
#include <asm/barrier.h>
#include <asm/cpumask.h>
#include <asm/mipsregs.h>
static atomic_t __initdata count_start_flag = ATOMIC_INIT(0);
static atomic_t __initdata count_count_start = ATOMIC_INIT(0);
static atomic_t __initdata count_count_stop = ATOMIC_INIT(0);
#define COUNTON 100
#define NR_LOOPS 5
void __init synchronise_count_master(void)
{
int i;
unsigned long flags;
unsigned int initcount;
int nslaves;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC needs to synchronise per VPE, not per CPU
* ignore for now
*/
return;
#endif
pr_info("Checking COUNT synchronization across %u CPUs: ",
num_online_cpus());
local_irq_save(flags);
/*
* Notify the slaves that it's time to start
*/
atomic_set(&count_start_flag, 1);
smp_wmb();
/* Count will be initialised to expirelo for all CPU's */
initcount = expirelo;
/*
* We loop a few times to get a primed instruction cache,
* then the last pass is more or less synchronised and
* the master and slaves each set their cycle counters to a known
* value all at once. This reduces the chance of having random offsets
* between the processors, and guarantees that the maximum
* delay between the cycle counters is never bigger than
* the latency of information-passing (cachelines) between
* two CPUs.
*/
nslaves = num_online_cpus()-1;
for (i = 0; i < NR_LOOPS; i++) {
/* slaves loop on '!= ncpus' */
while (atomic_read(&count_count_start) != nslaves)
mb();
atomic_set(&count_count_stop, 0);
smp_wmb();
/* this lets the slaves write their count register */
atomic_inc(&count_count_start);
/*
* Everyone initialises count in the last loop:
*/
if (i == NR_LOOPS-1)
write_c0_count(initcount);
/*
* Wait for all slaves to leave the synchronization point:
*/
while (atomic_read(&count_count_stop) != nslaves)
mb();
atomic_set(&count_count_start, 0);
smp_wmb();
atomic_inc(&count_count_stop);
}
/* Arrange for an interrupt in a short while */
write_c0_compare(read_c0_count() + COUNTON);
local_irq_restore(flags);
/*
* i386 code reported the skew here, but the
* count registers were almost certainly out of sync
* so no point in alarming people
*/
printk("done.\n");
}
void __init synchronise_count_slave(void)
{
int i;
unsigned long flags;
unsigned int initcount;
int ncpus;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC needs to synchronise per VPE, not per CPU
* ignore for now
*/
return;
#endif
local_irq_save(flags);
/*
* Not every cpu is online at the time this gets called,
* so we first wait for the master to say everyone is ready
*/
while (!atomic_read(&count_start_flag))
mb();
/* Count will be initialised to expirelo for all CPU's */
initcount = expirelo;
ncpus = num_online_cpus();
for (i = 0; i < NR_LOOPS; i++) {
atomic_inc(&count_count_start);
while (atomic_read(&count_count_start) != ncpus)
mb();
/*
* Everyone initialises count in the last loop:
*/
if (i == NR_LOOPS-1)
write_c0_count(initcount);
atomic_inc(&count_count_stop);
while (atomic_read(&count_count_stop) != ncpus)
mb();
}
/* Arrange for an interrupt in a short while */
write_c0_compare(read_c0_count() + COUNTON);
local_irq_restore(flags);
}
#undef NR_LOOPS
#endif
......@@ -22,6 +22,7 @@
#include <linux/kallsyms.h>
#include <linux/bootmem.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <asm/bootinfo.h>
#include <asm/branch.h>
......@@ -80,19 +81,22 @@ void (*board_bind_eic_interrupt)(int irq, int regset);
static void show_raw_backtrace(unsigned long reg29)
{
unsigned long *sp = (unsigned long *)reg29;
unsigned long *sp = (unsigned long *)(reg29 & ~3);
unsigned long addr;
printk("Call Trace:");
#ifdef CONFIG_KALLSYMS
printk("\n");
#endif
while (!kstack_end(sp)) {
addr = *sp++;
if (__kernel_text_address(addr))
print_ip_sym(addr);
#define IS_KVA01(a) ((((unsigned int)a) & 0xc0000000) == 0x80000000)
if (IS_KVA01(sp)) {
while (!kstack_end(sp)) {
addr = *sp++;
if (__kernel_text_address(addr))
print_ip_sym(addr);
}
printk("\n");
}
printk("\n");
}
#ifdef CONFIG_KALLSYMS
......@@ -192,16 +196,19 @@ EXPORT_SYMBOL(dump_stack);
static void show_code(unsigned int __user *pc)
{
long i;
unsigned short __user *pc16 = NULL;
printk("\nCode:");
if ((unsigned long)pc & 1)
pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
for(i = -3 ; i < 6 ; i++) {
unsigned int insn;
if (__get_user(insn, pc + i)) {
if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
printk(" (Bad address in epc)\n");
break;
}
printk("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
}
}
......@@ -311,10 +318,21 @@ void show_regs(struct pt_regs *regs)
void show_registers(const struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
__show_regs(regs);
print_modules();
printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
current->comm, task_pid_nr(current), current_thread_info(), current);
printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
current->comm, current->pid, current_thread_info(), current,
field, current_thread_info()->tp_value);
if (cpu_has_userlocal) {
unsigned long tls;
tls = read_c0_userlocal();
if (tls != current_thread_info()->tp_value)
printk("*HwTLS: %0*lx\n", field, tls);
}
show_stacktrace(current, regs);
show_code((unsigned int __user *) regs->cp0_epc);
printk("\n");
......@@ -985,6 +1003,21 @@ asmlinkage void do_reserved(struct pt_regs *regs)
(regs->cp0_cause & 0x7f) >> 2);
}
static int __initdata l1parity = 1;
static int __init nol1parity(char *s)
{
l1parity = 0;
return 1;
}
__setup("nol1par", nol1parity);
static int __initdata l2parity = 1;
static int __init nol2parity(char *s)
{
l2parity = 0;
return 1;
}
__setup("nol2par", nol2parity);
/*
* Some MIPS CPUs can enable/disable for cache parity detection, but do
* it different ways.
......@@ -994,6 +1027,62 @@ static inline void parity_protection_init(void)
switch (current_cpu_type()) {
case CPU_24K:
case CPU_34K:
case CPU_74K:
case CPU_1004K:
{
#define ERRCTL_PE 0x80000000
#define ERRCTL_L2P 0x00800000
unsigned long errctl;
unsigned int l1parity_present, l2parity_present;
errctl = read_c0_ecc();
errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
/* probe L1 parity support */
write_c0_ecc(errctl | ERRCTL_PE);
back_to_back_c0_hazard();
l1parity_present = (read_c0_ecc() & ERRCTL_PE);
/* probe L2 parity support */
write_c0_ecc(errctl|ERRCTL_L2P);
back_to_back_c0_hazard();
l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
if (l1parity_present && l2parity_present) {
if (l1parity)
errctl |= ERRCTL_PE;
if (l1parity ^ l2parity)
errctl |= ERRCTL_L2P;
} else if (l1parity_present) {
if (l1parity)
errctl |= ERRCTL_PE;
} else if (l2parity_present) {
if (l2parity)
errctl |= ERRCTL_L2P;
} else {
/* No parity available */
}
printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
write_c0_ecc(errctl);
back_to_back_c0_hazard();
errctl = read_c0_ecc();
printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
if (l1parity_present)
printk(KERN_INFO "Cache parity protection %sabled\n",
(errctl & ERRCTL_PE) ? "en" : "dis");
if (l2parity_present) {
if (l1parity_present && l1parity)
errctl ^= ERRCTL_L2P;
printk(KERN_INFO "L2 cache parity protection %sabled\n",
(errctl & ERRCTL_L2P) ? "en" : "dis");
}
}
break;
case CPU_5KC:
write_c0_ecc(0x80000000);
back_to_back_c0_hazard();
......@@ -1353,7 +1442,6 @@ void __cpuinit per_cpu_trap_init(void)
change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
status_set);
#ifdef CONFIG_CPU_MIPSR2
if (cpu_has_mips_r2) {
unsigned int enable = 0x0000000f;
......@@ -1362,7 +1450,6 @@ void __cpuinit per_cpu_trap_init(void)
write_c0_hwrena(enable);
}
#endif
#ifdef CONFIG_MIPS_MT_SMTC
if (!secondaryTC) {
......
......@@ -20,6 +20,7 @@
obj-y := reset.o display.o init.o memory.o \
cmdline.o time.o
obj-y += amon.o
obj-$(CONFIG_EARLY_PRINTK) += console.o
obj-$(CONFIG_PCI) += pci.o
......
/*
* Copyright (C) 2007 MIPS Technologies, Inc.
* All rights reserved.
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Arbitrary Monitor interface
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <asm-mips/addrspace.h>
#include <asm-mips/mips-boards/launch.h>
#include <asm-mips/mipsmtregs.h>
int amon_cpu_avail(int cpu)
{
struct cpulaunch *launch = (struct cpulaunch *)KSEG0ADDR(CPULAUNCH);
if (cpu < 0 || cpu >= NCPULAUNCH) {
pr_debug("avail: cpu%d is out of range\n", cpu);
return 0;
}
launch += cpu;
if (!(launch->flags & LAUNCH_FREADY)) {
pr_debug("avail: cpu%d is not ready\n", cpu);
return 0;
}
if (launch->flags & (LAUNCH_FGO|LAUNCH_FGONE)) {
pr_debug("avail: too late.. cpu%d is already gone\n", cpu);
return 0;
}
return 1;
}
void amon_cpu_start(int cpu,
unsigned long pc, unsigned long sp,
unsigned long gp, unsigned long a0)
{
volatile struct cpulaunch *launch =
(struct cpulaunch *)KSEG0ADDR(CPULAUNCH);
if (!amon_cpu_avail(cpu))
return;
if (cpu == smp_processor_id()) {
pr_debug("launch: I am cpu%d!\n", cpu);
return;
}
launch += cpu;
pr_debug("launch: starting cpu%d\n", cpu);
launch->pc = pc;
launch->gp = gp;
launch->sp = sp;
launch->a0 = a0;
/* Make sure target sees parameters before the go bit */
smp_mb();
launch->flags |= LAUNCH_FGO;
while ((launch->flags & LAUNCH_FGONE) == 0)
;
pr_debug("launch: cpu%d gone!\n", cpu);
}
......@@ -424,6 +424,9 @@ void __init prom_init(void)
#ifdef CONFIG_SERIAL_8250_CONSOLE
console_config();
#endif
#ifdef CONFIG_MIPS_CMP
register_smp_ops(&cmp_smp_ops);
#endif
#ifdef CONFIG_MIPS_MT_SMP
register_smp_ops(&vsmp_smp_ops);
#endif
......
......@@ -55,16 +55,36 @@
unsigned long cpu_khz;
static int mips_cpu_timer_irq;
static int mips_cpu_perf_irq;
extern int cp0_perfcount_irq;
DEFINE_PER_CPU(unsigned int, tickcount);
#define tickcount_this_cpu __get_cpu_var(tickcount)
static unsigned long ledbitmask;
static void mips_timer_dispatch(void)
{
#if defined(CONFIG_MIPS_MALTA) || defined(CONFIG_MIPS_ATLAS)
/*
* Yes, this is very tacky, won't work as expected with SMTC and
* dyntick will break it,
* but it gives me a nice warm feeling during debug
*/
#define LEDBAR 0xbf000408
if (tickcount_this_cpu++ >= HZ) {
tickcount_this_cpu = 0;
change_bit(smp_processor_id(), &ledbitmask);
smp_wmb(); /* Make sure every one else sees the change */
/* This will pick up any recent changes made by other CPU's */
*(unsigned int *)LEDBAR = ledbitmask;
}
#endif
do_IRQ(mips_cpu_timer_irq);
}
static void mips_perf_dispatch(void)
{
do_IRQ(cp0_perfcount_irq);
do_IRQ(mips_cpu_perf_irq);
}
/*
......@@ -129,19 +149,18 @@ unsigned long read_persistent_clock(void)
void __init plat_perf_setup(void)
{
cp0_perfcount_irq = -1;
#ifdef MSC01E_INT_BASE
if (cpu_has_veic) {
set_vi_handler(MSC01E_INT_PERFCTR, mips_perf_dispatch);
cp0_perfcount_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
mips_cpu_perf_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
} else
#endif
if (cp0_perfcount_irq >= 0) {
if (cpu_has_vint)
set_vi_handler(cp0_perfcount_irq, mips_perf_dispatch);
mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
#ifdef CONFIG_SMP
set_irq_handler(cp0_perfcount_irq, handle_percpu_irq);
set_irq_handler(mips_cpu_perf_irq, handle_percpu_irq);
#endif
}
}
......
......@@ -22,6 +22,7 @@
obj-y := malta_int.o malta_platform.o malta_setup.o
obj-$(CONFIG_MTD) += malta_mtd.o
# FIXME FIXME FIXME
obj-$(CONFIG_MIPS_MT_SMTC) += malta_smtc.o
EXTRA_CFLAGS += -Werror
This diff is collapsed.
......@@ -36,6 +36,9 @@
#include <linux/console.h>
#endif
extern void malta_be_init(void);
extern int malta_be_handler(struct pt_regs *regs, int is_fixup);
struct resource standard_io_resources[] = {
{
.name = "dma1",
......@@ -220,4 +223,7 @@ void __init plat_mem_setup(void)
screen_info_setup();
#endif
mips_reboot_setup();
board_be_init = malta_be_init;
board_be_handler = malta_be_handler;
}
......@@ -54,6 +54,12 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
preempt_enable();
}
#if defined(CONFIG_MIPS_CMP)
#define cpu_has_safe_index_cacheops 0
#else
#define cpu_has_safe_index_cacheops 1
#endif
/*
* Must die.
*/
......@@ -482,6 +488,8 @@ static inline void local_r4k_flush_cache_page(void *args)
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
r4k_blast_dcache_page(addr);
if (exec && !cpu_icache_snoops_remote_store)
r4k_blast_scache_page(addr);
}
if (exec) {
if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
......@@ -584,7 +592,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
* subset property so we have to flush the primary caches
* explicitly
*/
if (size >= dcache_size) {
if (cpu_has_safe_index_cacheops && size >= dcache_size) {
r4k_blast_dcache();
} else {
R4600_HIT_CACHEOP_WAR_IMPL;
......@@ -607,7 +615,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
return;
}
if (size >= dcache_size) {
if (cpu_has_safe_index_cacheops && size >= dcache_size) {
r4k_blast_dcache();
} else {
R4600_HIT_CACHEOP_WAR_IMPL;
......@@ -969,6 +977,7 @@ static void __cpuinit probe_pcache(void)
case CPU_24K:
case CPU_34K:
case CPU_74K:
case CPU_1004K:
if ((read_c0_config7() & (1 << 16))) {
/* effectively physically indexed dcache,
thus no virtual aliases. */
......@@ -1265,6 +1274,20 @@ static void __cpuinit coherency_setup(void)
}
}
#if defined(CONFIG_DMA_NONCOHERENT)
static int __cpuinitdata coherentio;
static int __init setcoherentio(char *str)
{
coherentio = 1;
return 1;
}
__setup("coherentio", setcoherentio);
#endif
void __cpuinit r4k_cache_init(void)
{
extern void build_clear_page(void);
......@@ -1324,14 +1347,22 @@ void __cpuinit r4k_cache_init(void)
flush_data_cache_page = r4k_flush_data_cache_page;
flush_icache_range = r4k_flush_icache_range;
#ifdef CONFIG_DMA_NONCOHERENT
_dma_cache_wback_inv = r4k_dma_cache_wback_inv;
_dma_cache_wback = r4k_dma_cache_wback_inv;
_dma_cache_inv = r4k_dma_cache_inv;
#if defined(CONFIG_DMA_NONCOHERENT)
if (coherentio) {
_dma_cache_wback_inv = (void *)cache_noop;
_dma_cache_wback = (void *)cache_noop;
_dma_cache_inv = (void *)cache_noop;
} else {
_dma_cache_wback_inv = r4k_dma_cache_wback_inv;
_dma_cache_wback = r4k_dma_cache_wback_inv;
_dma_cache_inv = r4k_dma_cache_inv;
}
#endif
build_clear_page();
build_copy_page();
#if !defined(CONFIG_MIPS_CMP)
local_r4k___flush_cache_all(NULL);
#endif
coherency_setup();
}
......@@ -221,7 +221,7 @@ void copy_user_highpage(struct page *to, struct page *from,
copy_page(vto, vfrom);
kunmap_atomic(vfrom, KM_USER0);
}
if (((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) ||
if ((!cpu_has_ic_fills_f_dc) ||
pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
flush_data_cache_page((unsigned long)vto);
kunmap_atomic(vto, KM_USER1);
......
......@@ -80,6 +80,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
case CPU_24K:
case CPU_25KF:
case CPU_34K:
case CPU_1004K:
case CPU_74K:
case CPU_SB1:
case CPU_SB1A:
......
......@@ -32,8 +32,11 @@
#define M_COUNTER_OVERFLOW (1UL << 31)
#ifdef CONFIG_MIPS_MT_SMP
#define WHAT (M_TC_EN_VPE | M_PERFCTL_VPEID(smp_processor_id()))
#define vpe_id() smp_processor_id()
static int cpu_has_mipsmt_pertccounters;
#define WHAT (M_TC_EN_VPE | \
M_PERFCTL_VPEID(cpu_data[smp_processor_id()].vpe_id))
#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
0 : cpu_data[smp_processor_id()].vpe_id)
/*
* The number of bits to shift to convert between counters per core and
......@@ -243,11 +246,11 @@ static inline int __n_counters(void)
{
if (!(read_c0_config1() & M_CONFIG1_PC))
return 0;
if (!(r_c0_perfctrl0() & M_PERFCTL_MORE))
if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
return 1;
if (!(r_c0_perfctrl1() & M_PERFCTL_MORE))
if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
return 2;
if (!(r_c0_perfctrl2() & M_PERFCTL_MORE))
if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
return 3;
return 4;
......@@ -274,8 +277,9 @@ static inline int n_counters(void)
return counters;
}
static inline void reset_counters(int counters)
static void reset_counters(void *arg)
{
int counters = (int)arg;
switch (counters) {
case 4:
w_c0_perfctrl3(0);
......@@ -302,9 +306,12 @@ static int __init mipsxx_init(void)
return -ENODEV;
}
reset_counters(counters);
counters = counters_total_to_per_cpu(counters);
#ifdef CONFIG_MIPS_MT_SMP
cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
if (!cpu_has_mipsmt_pertccounters)
counters = counters_total_to_per_cpu(counters);
#endif
on_each_cpu(reset_counters, (void *)counters, 0, 1);
op_model_mipsxx_ops.num_counters = counters;
switch (current_cpu_type()) {
......@@ -320,6 +327,13 @@ static int __init mipsxx_init(void)
op_model_mipsxx_ops.cpu_type = "mips/25K";
break;
case CPU_1004K:
#if 0
/* FIXME: report as 34K for now */
op_model_mipsxx_ops.cpu_type = "mips/1004K";
break;
#endif
case CPU_34K:
op_model_mipsxx_ops.cpu_type = "mips/34K";
break;
......@@ -365,7 +379,7 @@ static void mipsxx_exit(void)
int counters = op_model_mipsxx_ops.num_counters;
counters = counters_per_cpu_to_total(counters);
reset_counters(counters);
on_each_cpu(reset_counters, (void *)counters, 0, 1);
perf_irq = null_perf_irq;
}
......
#ifndef _ASM_CMP_H
#define _ASM_CMP_H
/*
* Definitions for CMP multitasking on MIPS cores
*/
struct task_struct;
extern void cmp_smp_setup(void);
extern void cmp_smp_finish(void);
extern void cmp_boot_secondary(int cpu, struct task_struct *t);
extern void cmp_init_secondary(void);
extern void cmp_cpus_done(void);
extern void cmp_prepare_cpus(unsigned int max_cpus);
/* This is platform specific */
extern void cmp_send_ipi(int cpu, unsigned int action);
#endif /* _ASM_CMP_H */
......@@ -89,6 +89,7 @@
#define PRID_IMP_34K 0x9500
#define PRID_IMP_24KE 0x9600
#define PRID_IMP_74K 0x9700
#define PRID_IMP_1004K 0x9900
#define PRID_IMP_LOONGSON1 0x4200
#define PRID_IMP_LOONGSON2 0x6300
......@@ -194,9 +195,9 @@ enum cpu_type_enum {
/*
* MIPS32 class processors
*/
CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_74K, CPU_AU1000,
CPU_AU1100, CPU_AU1200, CPU_AU1210, CPU_AU1250, CPU_AU1500, CPU_AU1550,
CPU_PR4450, CPU_BCM3302, CPU_BCM4710,
CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
CPU_AU1000, CPU_AU1100, CPU_AU1200, CPU_AU1210, CPU_AU1250, CPU_AU1500,
CPU_AU1550, CPU_PR4450, CPU_BCM3302, CPU_BCM4710,
/*
* MIPS64 class processors
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000, 07 MIPS Technologies, Inc.
*
* Multiprocessor Subsystem Register Definitions
*
*/
#ifndef _ASM_GCMPREGS_H
#define _ASM_GCMPREGS_H
/* Offsets to major blocks within GCMP from GCMP base */
#define GCMP_GCB_OFS 0x0000 /* Global Control Block */
#define GCMP_CLCB_OFS 0x2000 /* Core Local Control Block */
#define GCMP_COCB_OFS 0x4000 /* Core Other Control Block */
#define GCMP_GDB_OFS 0x8000 /* Global Debug Block */
/* Offsets to individual GCMP registers from GCMP base */
#define GCMPOFS(block, tag, reg) (GCMP_##block##_OFS + GCMP_##tag##_##reg##_OFS)
#define GCMPGCBOFS(reg) GCMPOFS(GCB, GCB, reg)
#define GCMPCLCBOFS(reg) GCMPOFS(CLCB, CCB, reg)
#define GCMPCOCBOFS(reg) GCMPOFS(COCB, CCB, reg)
#define GCMPGDBOFS(reg) GCMPOFS(GDB, GDB, reg)
/* GCMP register access */
#define GCMPGCB(reg) REGP(_gcmp_base, GCMPGCBOFS(reg))
#define GCMPCLCB(reg) REGP(_gcmp_base, GCMPCLCBOFS(reg))
#define GCMPCOCB(reg) REGP(_gcmp_base, GCMPCOCBOFS(reg))
#define GCMPGDB(reg) REGP(_gcmp_base, GCMPGDBOFS(reg))
/* Mask generation */
#define GCMPMSK(block, reg, bits) (MSK(bits)<<GCMP_##block##_##reg##_SHF)
#define GCMPGCBMSK(reg, bits) GCMPMSK(GCB, reg, bits)
#define GCMPCCBMSK(reg, bits) GCMPMSK(CCB, reg, bits)
#define GCMPGDBMSK(reg, bits) GCMPMSK(GDB, reg, bits)
/* GCB registers */
#define GCMP_GCB_GC_OFS 0x0000 /* Global Config Register */
#define GCMP_GCB_GC_NUMIOCU_SHF 8
#define GCMP_GCB_GC_NUMIOCU_MSK GCMPGCBMSK(GC_NUMIOCU, 4)
#define GCMP_GCB_GC_NUMCORES_SHF 0
#define GCMP_GCB_GC_NUMCORES_MSK GCMPGCBMSK(GC_NUMCORES, 8)
#define GCMP_GCB_GCMPB_OFS 0x0008 /* Global GCMP Base */
#define GCMP_GCB_GCMPB_GCMPBASE_SHF 15
#define GCMP_GCB_GCMPB_GCMPBASE_MSK GCMPGCBMSK(GCMPB_GCMPBASE, 17)
#define GCMP_GCB_GCMPB_CMDEFTGT_SHF 0
#define GCMP_GCB_GCMPB_CMDEFTGT_MSK GCMPGCBMSK(GCMPB_CMDEFTGT, 2)
#define GCMP_GCB_GCMPB_CMDEFTGT_MEM 0
#define GCMP_GCB_GCMPB_CMDEFTGT_MEM1 1
#define GCMP_GCB_GCMPB_CMDEFTGT_IOCU1 2
#define GCMP_GCB_GCMPB_CMDEFTGT_IOCU2 3
#define GCMP_GCB_CCMC_OFS 0x0010 /* Global CM Control */
#define GCMP_GCB_GCSRAP_OFS 0x0020 /* Global CSR Access Privilege */
#define GCMP_GCB_GCSRAP_CMACCESS_SHF 0
#define GCMP_GCB_GCSRAP_CMACCESS_MSK GCMPGCBMSK(GCSRAP_CMACCESS, 8)
#define GCMP_GCB_GCMPREV_OFS 0x0030 /* GCMP Revision Register */
#define GCMP_GCB_GCMEM_OFS 0x0040 /* Global CM Error Mask */
#define GCMP_GCB_GCMEC_OFS 0x0048 /* Global CM Error Cause */
#define GCMP_GCB_GMEC_ERROR_TYPE_SHF 27
#define GCMP_GCB_GMEC_ERROR_TYPE_MSK GCMPGCBMSK(GMEC_ERROR_TYPE, 5)
#define GCMP_GCB_GMEC_ERROR_INFO_SHF 0
#define GCMP_GCB_GMEC_ERROR_INFO_MSK GCMPGCBMSK(GMEC_ERROR_INFO, 27)
#define GCMP_GCB_GCMEA_OFS 0x0050 /* Global CM Error Address */
#define GCMP_GCB_GCMEO_OFS 0x0058 /* Global CM Error Multiple */
#define GCMP_GCB_GMEO_ERROR_2ND_SHF 0
#define GCMP_GCB_GMEO_ERROR_2ND_MSK GCMPGCBMSK(GMEO_ERROR_2ND, 5)
#define GCMP_GCB_GICBA_OFS 0x0080 /* Global Interrupt Controller Base Address */
#define GCMP_GCB_GICBA_BASE_SHF 17
#define GCMP_GCB_GICBA_BASE_MSK GCMPGCBMSK(GICBA_BASE, 15)
#define GCMP_GCB_GICBA_EN_SHF 0
#define GCMP_GCB_GICBA_EN_MSK GCMPGCBMSK(GICBA_EN, 1)
/* GCB Regions */
#define GCMP_GCB_CMxBASE_OFS(n) (0x0090+16*(n)) /* Global Region[0-3] Base Address */
#define GCMP_GCB_CMxBASE_BASE_SHF 16
#define GCMP_GCB_CMxBASE_BASE_MSK GCMPGCBMSK(CMxBASE_BASE, 16)
#define GCMP_GCB_CMxMASK_OFS(n) (0x0098+16*(n)) /* Global Region[0-3] Address Mask */
#define GCMP_GCB_CMxMASK_MASK_SHF 16
#define GCMP_GCB_CMxMASK_MASK_MSK GCMPGCBMSK(CMxMASK_MASK, 16)
#define GCMP_GCB_CMxMASK_CMREGTGT_SHF 0
#define GCMP_GCB_CMxMASK_CMREGTGT_MSK GCMPGCBMSK(CMxMASK_CMREGTGT, 2)
#define GCMP_GCB_CMxMASK_CMREGTGT_MEM 0
#define GCMP_GCB_CMxMASK_CMREGTGT_MEM1 1
#define GCMP_GCB_CMxMASK_CMREGTGT_IOCU1 2
#define GCMP_GCB_CMxMASK_CMREGTGT_IOCU2 3
/* Core local/Core other control block registers */
#define GCMP_CCB_RESETR_OFS 0x0000 /* Reset Release */
#define GCMP_CCB_RESETR_INRESET_SHF 0
#define GCMP_CCB_RESETR_INRESET_MSK GCMPCCBMSK(RESETR_INRESET, 16)
#define GCMP_CCB_COHCTL_OFS 0x0008 /* Coherence Control */
#define GCMP_CCB_COHCTL_DOMAIN_SHF 0
#define GCMP_CCB_COHCTL_DOMAIN_MSK GCMPCCBMSK(COHCTL_DOMAIN, 8)
#define GCMP_CCB_CFG_OFS 0x0010 /* Config */
#define GCMP_CCB_CFG_IOCUTYPE_SHF 10
#define GCMP_CCB_CFG_IOCUTYPE_MSK GCMPCCBMSK(CFG_IOCUTYPE, 2)
#define GCMP_CCB_CFG_IOCUTYPE_CPU 0
#define GCMP_CCB_CFG_IOCUTYPE_NCIOCU 1
#define GCMP_CCB_CFG_IOCUTYPE_CIOCU 2
#define GCMP_CCB_CFG_NUMVPE_SHF 0
#define GCMP_CCB_CFG_NUMVPE_MSK GCMPCCBMSK(CFG_NUMVPE, 10)
#define GCMP_CCB_OTHER_OFS 0x0018 /* Other Address */
#define GCMP_CCB_OTHER_CORENUM_SHF 16
#define GCMP_CCB_OTHER_CORENUM_MSK GCMPCCBMSK(OTHER_CORENUM, 16)
#define GCMP_CCB_RESETBASE_OFS 0x0020 /* Reset Exception Base */
#define GCMP_CCB_RESETBASE_BEV_SHF 12
#define GCMP_CCB_RESETBASE_BEV_MSK GCMPCCBMSK(RESETBASE_BEV, 20)
#define GCMP_CCB_ID_OFS 0x0028 /* Identification */
#define GCMP_CCB_DINTGROUP_OFS 0x0030 /* DINT Group Participate */
#define GCMP_CCB_DBGGROUP_OFS 0x0100 /* DebugBreak Group */
#endif /* _ASM_GCMPREGS_H */
This diff is collapsed.
/*
*
*/
#ifndef _ASSEMBLER_
struct cpulaunch {
unsigned long pc;
unsigned long gp;
unsigned long sp;
unsigned long a0;
unsigned long _pad[3]; /* pad to cache line size to avoid thrashing */
unsigned long flags;
};
#else
#define LOG2CPULAUNCH 5
#define LAUNCH_PC 0
#define LAUNCH_GP 4
#define LAUNCH_SP 8
#define LAUNCH_A0 12
#define LAUNCH_FLAGS 28
#endif
#define LAUNCH_FREADY 1
#define LAUNCH_FGO 2
#define LAUNCH_FGONE 4
#define CPULAUNCH 0x00000f00
#define NCPULAUNCH 8
/* Polling period in count cycles for secondary CPU's */
#define LAUNCHPERIOD 10000
......@@ -51,6 +51,29 @@ static inline unsigned long get_msc_port_base(unsigned long reg)
return (unsigned long) ioremap(addr, 0x10000);
}
/*
* GCMP Specific definitions
*/
#define GCMP_BASE_ADDR 0x1fbf8000
#define GCMP_ADDRSPACE_SZ (256 * 1024)
/*
* GIC Specific definitions
*/
#define GIC_BASE_ADDR 0x1bdc0000
#define GIC_ADDRSPACE_SZ (128 * 1024)
/*
* MSC01 BIU Specific definitions
* FIXME : These should be elsewhere ?
*/
#define MSC01_BIU_REG_BASE 0x1bc80000
#define MSC01_BIU_ADDRSPACE_SZ (256 * 1024)
#define MSC01_SC_CFG_OFS 0x0110
#define MSC01_SC_CFG_GICPRES_MSK 0x00000004
#define MSC01_SC_CFG_GICPRES_SHF 2
#define MSC01_SC_CFG_GICENA_SHF 3
/*
* Malta RTC-device indirect register access.
*/
......
......@@ -39,7 +39,9 @@
#define MIPSCPU_INT_I8259A MIPSCPU_INT_MB0
#define MIPSCPU_INT_MB1 3
#define MIPSCPU_INT_SMI MIPSCPU_INT_MB1
#define MIPSCPU_INT_IPI0 MIPSCPU_INT_MB1 /* GIC IPI */
#define MIPSCPU_INT_MB2 4
#define MIPSCPU_INT_IPI1 MIPSCPU_INT_MB2 /* GIC IPI */
#define MIPSCPU_INT_MB3 5
#define MIPSCPU_INT_COREHI MIPSCPU_INT_MB3
#define MIPSCPU_INT_MB4 6
......@@ -76,6 +78,31 @@
#define MSC01E_INT_PERFCTR 10
#define MSC01E_INT_CPUCTR 11
/* GIC's Nomenclature for Core Interrupt Pins on the Malta */
#define GIC_CPU_INT0 0 /* Core Interrupt 2 */
#define GIC_CPU_INT1 1 /* . */
#define GIC_CPU_INT2 2 /* . */
#define GIC_CPU_INT3 3 /* . */
#define GIC_CPU_INT4 4 /* . */
#define GIC_CPU_INT5 5 /* Core Interrupt 5 */
#define GIC_EXT_INTR(x) x
/* Dummy data */
#define X 0xdead
/* External Interrupts used for IPI */
#define GIC_IPI_EXT_INTR_RESCHED_VPE0 16
#define GIC_IPI_EXT_INTR_CALLFNC_VPE0 17
#define GIC_IPI_EXT_INTR_RESCHED_VPE1 18
#define GIC_IPI_EXT_INTR_CALLFNC_VPE1 19
#define GIC_IPI_EXT_INTR_RESCHED_VPE2 20
#define GIC_IPI_EXT_INTR_CALLFNC_VPE2 21
#define GIC_IPI_EXT_INTR_RESCHED_VPE3 22
#define GIC_IPI_EXT_INTR_CALLFNC_VPE3 23
#define MIPS_GIC_IRQ_BASE (MIPS_CPU_IRQ_BASE + 8)
#ifndef __ASSEMBLY__
extern void maltaint_init(void);
#endif
......
/*
* There are several SMP models supported
* SMTC is mutually exclusive to other options (atm)
*/
#if defined(CONFIG_MIPS_MT_SMTC)
#define malta_smtc 1
#define malta_cmp 0
#define malta_smvp 0
#else
#define malta_smtc 0
#if defined(CONFIG_MIPS_CMP)
extern int gcmp_present;
#define malta_cmp gcmp_present
#else
#define malta_cmp 0
#endif
/* FIXME: should become COMFIG_MIPS_MT_SMVP */
#if defined(CONFIG_MIPS_MT_SMP)
#define malta_smvp 1
#else
#define malta_smvp 0
#endif
#endif
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
/* malta_smtc */
#include <asm/smtc.h>
#include <asm/smtc_ipi.h>
/* malta_cmp */
#include <asm/cmp.h>
/* malta_smvp */
#include <asm/smvp.h>
......@@ -197,8 +197,8 @@ static inline void __raw_evpe(void)
" .set pop \n");
}
/* Enable multiMT if previous suggested it should be.
EMT_ENABLE to force */
/* Enable virtual processor execution if previous suggested it should be.
EVPE_ENABLE to force */
#define EVPE_ENABLE MVPCONTROL_EVP
......@@ -238,8 +238,8 @@ static inline void __raw_emt(void)
" .set reorder");
}
/* enable multiVPE if previous suggested it should be.
EVPE_ENABLE to force */
/* enable multi-threaded execution if previous suggested it should be.
EMT_ENABLE to force */
#define EMT_ENABLE VPECONTROL_TE
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 by Ralf Baechle (ralf@linux-mips.org)
*/
#ifndef __ASM_R4K_TYPES_H
#define __ASM_R4K_TYPES_H
#include <linux/compiler.h>
#ifdef CONFIG_SYNC_R4K
extern void synchronise_count_master(void);
extern void synchronise_count_slave(void);
#else
static inline void synchronise_count_master(void)
{
}
static inline void synchronise_count_slave(void)
{
}
#endif
#endif /* __ASM_R4K_TYPES_H */
......@@ -51,6 +51,7 @@ static inline void register_smp_ops(struct plat_smp_ops *ops)
#endif /* !CONFIG_SMP */
extern struct plat_smp_ops up_smp_ops;
extern struct plat_smp_ops cmp_smp_ops;
extern struct plat_smp_ops vsmp_smp_ops;
#endif /* __ASM_SMP_OPS_H */
......@@ -44,6 +44,7 @@ extern int mipsmt_build_cpu_map(int startslot);
extern void mipsmt_prepare_cpus(void);
extern void smtc_smp_finish(void);
extern void smtc_boot_secondary(int cpu, struct task_struct *t);
extern void smtc_cpus_done(void);
/*
* Sharing the TLB between multiple VPEs means that the
......
#ifndef _ASM_SMVP_H
#define _ASM_SMVP_H
/*
* Definitions for SMVP multitasking on MIPS MT cores
*/
struct task_struct;
extern void smvp_smp_setup(void);
extern void smvp_smp_finish(void);
extern void smvp_boot_secondary(int cpu, struct task_struct *t);
extern void smvp_init_secondary(void);
extern void smvp_smp_finish(void);
extern void smvp_cpus_done(void);
extern void smvp_prepare_cpus(unsigned int max_cpus);
/* This is platform specific */
extern void smvp_send_ipi(int cpu, unsigned int action);
#endif /* _ASM_SMVP_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment