Commit 1308fd75 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arc-4.9-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc

Pull ARC updates from Vineet Gupta:

 - support IDU intc for UP builds

 - support gz, lzma compressed uImage [Daniel Mentz]

 - adjust /proc/cpuinfo for non-continuous cpu ids [Noam Camus]

 - syscall for userspace cmpxchg assist for configs lacking hardware atomics

 - rework of boot log printing mainly for identifying older arc700 cores

 - retiring some old code, build toggles

* tag 'arc-4.9-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
  ARC: module: print pretty section names
  ARC: module: elide loop to save reference to .eh_frame
  ARC: mm: retire ARC_DBG_TLB_MISS_COUNT...
  ARC: build: retire old toggles
  ARC: boot log: refactor cpu name/release printing
  ARC: boot log: remove awkward space comma from MMU line
  ARC: boot log: don't assume SWAPE instruction support
  ARC: boot log: refactor printing abt features not captured in BCRs
  ARCv2: boot log: print IOC exists as well as enabled status
  ARCv2: IOC: use @ioc_enable not @ioc_exist where intended
  ARC: syscall for userspace cmpxchg assist
  ARC: fix build warning in elf.h
  ARC: Adjust cpuinfo for non-continuous cpu ids
  ARC: [build] Support gz, lzma compressed uImage
  ARCv2: intc: untangle SMP, MCIP and IDU
parents 6fcc8cea b75dcd9c
......@@ -41,6 +41,8 @@ config ARC
select PERF_USE_VMALLOC
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_GENERIC_DMA_COHERENT
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZMA
config MIGHT_HAVE_PCI
bool
......@@ -186,14 +188,6 @@ if SMP
config ARC_HAS_COH_CACHES
def_bool n
config ARC_MCIP
bool "ARConnect Multicore IP (MCIP) Support "
depends on ISA_ARCV2
help
This IP block enables SMP in ARC-HS38 cores.
It provides for cross-core interrupts, multi-core debug
hardware semaphores, shared memory,....
config NR_CPUS
int "Maximum number of CPUs (2-4096)"
range 2 4096
......@@ -211,6 +205,15 @@ config ARC_SMP_HALT_ON_RESET
endif #SMP
config ARC_MCIP
bool "ARConnect Multicore IP (MCIP) Support "
depends on ISA_ARCV2
default y if SMP
help
This IP block enables SMP in ARC-HS38 cores.
It provides for cross-core interrupts, multi-core debug
hardware semaphores, shared memory,....
menuconfig ARC_CACHE
bool "Enable Cache Support"
default y
......@@ -537,14 +540,6 @@ config ARC_DBG_TLB_PARANOIA
bool "Paranoia Checks in Low Level TLB Handlers"
default n
config ARC_DBG_TLB_MISS_COUNT
bool "Profile TLB Misses"
default n
select DEBUG_FS
help
Counts number of I and D TLB Misses and exports them via Debugfs
The counters can be cleared via Debugfs as well
endif
config ARC_UBOOT_SUPPORT
......
......@@ -50,9 +50,6 @@ atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y)
cflags-$(atleast_gcc44) += -fsection-anchors
cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
ifdef CONFIG_ISA_ARCV2
ifndef CONFIG_ARC_HAS_LL64
......
......@@ -14,9 +14,15 @@ UIMAGE_ENTRYADDR = $(LINUX_START_TEXT)
suffix-y := bin
suffix-$(CONFIG_KERNEL_GZIP) := gz
suffix-$(CONFIG_KERNEL_LZMA) := lzma
targets += uImage uImage.bin uImage.gz
extra-y += vmlinux.bin vmlinux.bin.gz
targets += uImage
targets += uImage.bin
targets += uImage.gz
targets += uImage.lzma
extra-y += vmlinux.bin
extra-y += vmlinux.bin.gz
extra-y += vmlinux.bin.lzma
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
......@@ -24,12 +30,18 @@ $(obj)/vmlinux.bin: vmlinux FORCE
$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip)
$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
$(call if_changed,lzma)
$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
$(call if_changed,uimage,none)
$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
$(call if_changed,uimage,gzip)
$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
$(call if_changed,uimage,lzma)
$(obj)/uImage: $(obj)/uImage.$(suffix-y)
@ln -sf $(notdir $<) $@
@echo ' Image $@ is ready'
......@@ -349,10 +349,11 @@ struct cpuinfo_arc {
struct cpuinfo_arc_bpu bpu;
struct bcr_identity core;
struct bcr_isa isa;
const char *details, *name;
unsigned int vec_base;
struct cpuinfo_arc_ccm iccm, dccm;
struct {
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3,
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
fpu_sp:1, fpu_dp:1, pad2:6,
debug:1, ap:1, smart:1, rtt:1, pad3:4,
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
......
......@@ -53,7 +53,7 @@ extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void read_decode_cache_bcr(void);
extern int ioc_exists;
extern int ioc_enable;
extern unsigned long perip_base, perip_end;
#endif /* !__ASSEMBLY__ */
......
......@@ -54,7 +54,7 @@ extern int elf_check_arch(const struct elf32_hdr *);
* the loader. We need to make sure that it is out of the way of the program
* that it will "exec", and that there is sufficient room for the brk.
*/
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
#define ELF_ET_DYN_BASE (2UL * TASK_SIZE / 3)
/*
* When the program starts, a1 contains a pointer to a function to be
......
......@@ -55,6 +55,22 @@ struct mcip_cmd {
#define IDU_M_DISTRI_DEST 0x2
};
struct mcip_bcr {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad3:8,
idu:1, llm:1, num_cores:6,
iocoh:1, gfrc:1, dbg:1, pad2:1,
msg:1, sem:1, ipi:1, pad:1,
ver:8;
#else
unsigned int ver:8,
pad:1, ipi:1, sem:1, msg:1,
pad2:1, dbg:1, gfrc:1, iocoh:1,
num_cores:6, llm:1, idu:1,
pad3:8;
#endif
};
/*
* MCIP programming model
*
......
......@@ -18,6 +18,7 @@
struct mod_arch_specific {
void *unw_info;
int unw_sec_idx;
const char *secstr;
};
#endif
......
......@@ -27,11 +27,6 @@ struct id_to_str {
const char *str;
};
struct cpuinfo_data {
struct id_to_str info;
int up_range;
};
extern int root_mountflags, end_mem;
void setup_processor(void);
......@@ -43,5 +38,6 @@ void __init setup_arch_memory(void);
#define IS_USED_RUN(v) ((v) ? "" : "(not used) ")
#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg))
#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
#define IS_AVAIL3(v, v2, s) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_DISABLED_RUN(v2))
#endif /* __ASMARC_SETUP_H */
......@@ -17,6 +17,7 @@ int sys_clone_wrapper(int, int, int, int, int);
int sys_cacheflush(uint32_t, uint32_t uint32_t);
int sys_arc_settls(void *);
int sys_arc_gettls(void);
int sys_arc_usr_cmpxchg(int *, int, int);
#include <asm-generic/syscalls.h>
......
......@@ -27,18 +27,19 @@
#define NR_syscalls __NR_syscalls
/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
#define __NR_sysfs (__NR_arch_specific_syscall + 3)
/* ARC specific syscall */
#define __NR_cacheflush (__NR_arch_specific_syscall + 0)
#define __NR_arc_settls (__NR_arch_specific_syscall + 1)
#define __NR_arc_gettls (__NR_arch_specific_syscall + 2)
#define __NR_arc_usr_cmpxchg (__NR_arch_specific_syscall + 4)
__SYSCALL(__NR_cacheflush, sys_cacheflush)
__SYSCALL(__NR_arc_settls, sys_arc_settls)
__SYSCALL(__NR_arc_gettls, sys_arc_gettls)
/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
#define __NR_sysfs (__NR_arch_specific_syscall + 3)
__SYSCALL(__NR_arc_usr_cmpxchg, sys_arc_usr_cmpxchg)
__SYSCALL(__NR_sysfs, sys_sysfs)
#undef __SYSCALL
......
......@@ -15,11 +15,12 @@
#include <asm/mcip.h>
#include <asm/setup.h>
static char smp_cpuinfo_buf[128];
static int idu_detected;
static DEFINE_RAW_SPINLOCK(mcip_lock);
#ifdef CONFIG_SMP
static char smp_cpuinfo_buf[128];
static void mcip_setup_per_cpu(int cpu)
{
smp_ipi_irq_setup(cpu, IPI_IRQ);
......@@ -86,21 +87,7 @@ static void mcip_ipi_clear(int irq)
static void mcip_probe_n_setup(void)
{
struct mcip_bcr {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad3:8,
idu:1, llm:1, num_cores:6,
iocoh:1, gfrc:1, dbg:1, pad2:1,
msg:1, sem:1, ipi:1, pad:1,
ver:8;
#else
unsigned int ver:8,
pad:1, ipi:1, sem:1, msg:1,
pad2:1, dbg:1, gfrc:1, iocoh:1,
num_cores:6, llm:1, idu:1,
pad3:8;
#endif
} mp;
struct mcip_bcr mp;
READ_BCR(ARC_REG_MCIP_BCR, mp);
......@@ -114,7 +101,6 @@ static void mcip_probe_n_setup(void)
IS_AVAIL1(mp.gfrc, "GFRC"));
cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
idu_detected = mp.idu;
if (mp.dbg) {
__mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
......@@ -130,6 +116,8 @@ struct plat_smp_ops plat_smp_ops = {
.ipi_clear = mcip_ipi_clear,
};
#endif
/***************************************************************************
* ARCv2 Interrupt Distribution Unit (IDU)
*
......@@ -295,8 +283,11 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
/* Read IDU BCR to confirm nr_irqs */
int nr_irqs = of_irq_count(intc);
int i, irq;
struct mcip_bcr mp;
READ_BCR(ARC_REG_MCIP_BCR, mp);
if (!idu_detected)
if (!mp.idu)
panic("IDU not detected, but DeviceTree using it");
pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs);
......
......@@ -30,17 +30,9 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstr, struct module *mod)
{
#ifdef CONFIG_ARC_DW2_UNWIND
int i;
mod->arch.unw_sec_idx = 0;
mod->arch.unw_info = NULL;
for (i = 1; i < hdr->e_shnum; i++) {
if (strcmp(secstr+sechdrs[i].sh_name, ".eh_frame") == 0) {
mod->arch.unw_sec_idx = i;
break;
}
}
mod->arch.secstr = secstr;
#endif
return 0;
}
......@@ -59,29 +51,33 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
unsigned int relsec, /* sec index for relo sec */
struct module *module)
{
int i, n;
int i, n, relo_type;
Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym_entry, *sym_sec;
Elf32_Addr relocation;
Elf32_Addr location;
Elf32_Addr sec_to_patch;
int relo_type;
sec_to_patch = sechdrs[sechdrs[relsec].sh_info].sh_addr;
Elf32_Addr relocation, location, tgt_addr;
unsigned int tgtsec;
/*
* @relsec has relocations e.g. .rela.init.text
* @tgtsec is section to patch e.g. .init.text
*/
tgtsec = sechdrs[relsec].sh_info;
tgt_addr = sechdrs[tgtsec].sh_addr;
sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr;
n = sechdrs[relsec].sh_size / sizeof(*rel_entry);
pr_debug("\n========== Module Sym reloc ===========================\n");
pr_debug("Section to fixup %x\n", sec_to_patch);
pr_debug("\nSection to fixup %s @%x\n",
module->arch.secstr + sechdrs[tgtsec].sh_name, tgt_addr);
pr_debug("=========================================================\n");
pr_debug("rela->r_off | rela->addend | sym->st_value | ADDR | VALUE\n");
pr_debug("r_off\tr_add\tst_value ADDRESS VALUE\n");
pr_debug("=========================================================\n");
/* Loop thru entries in relocation section */
for (i = 0; i < n; i++) {
const char *s;
/* This is where to make the change */
location = sec_to_patch + rel_entry[i].r_offset;
location = tgt_addr + rel_entry[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
......@@ -89,10 +85,15 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
relocation = sym_entry->st_value + rel_entry[i].r_addend;
pr_debug("\t%x\t\t%x\t\t%x %x %x [%s]\n",
rel_entry[i].r_offset, rel_entry[i].r_addend,
sym_entry->st_value, location, relocation,
strtab + sym_entry->st_name);
if (sym_entry->st_name == 0 && ELF_ST_TYPE (sym_entry->st_info) == STT_SECTION) {
s = module->arch.secstr + sechdrs[sym_entry->st_shndx].sh_name;
} else {
s = strtab + sym_entry->st_name;
}
pr_debug(" %x\t%x\t%x %x %x [%s]\n",
rel_entry[i].r_offset, rel_entry[i].r_addend,
sym_entry->st_value, location, relocation, s);
/* This assumes modules are built with -mlong-calls
* so any branches/jumps are absolute 32 bit jmps
......@@ -111,6 +112,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
goto relo_err;
}
if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
module->arch.unw_sec_idx = tgtsec;
return 0;
relo_err:
......
......@@ -41,6 +41,39 @@ SYSCALL_DEFINE0(arc_gettls)
return task_thread_info(current)->thr_ptr;
}
SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
{
int uval;
int ret;
/*
* This is only for old cores lacking LLOCK/SCOND, which by defintion
* can't possibly be SMP. Thus doesn't need to be SMP safe.
* And this also helps reduce the overhead for serializing in
* the UP case
*/
WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
preempt_disable();
ret = __get_user(uval, uaddr);
if (ret)
goto done;
if (uval != expected)
ret = -EAGAIN;
else
ret = __put_user(new, uaddr);
done:
preempt_enable();
return ret;
}
void arch_cpu_idle(void)
{
/* sleep, but enable all interrupts before committing */
......
......@@ -40,6 +40,29 @@ struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
static const struct id_to_str arc_cpu_rel[] = {
#ifdef CONFIG_ISA_ARCOMPACT
{ 0x34, "R4.10"},
{ 0x35, "R4.11"},
#else
{ 0x51, "R2.0" },
{ 0x52, "R2.1" },
{ 0x53, "R3.0" },
#endif
{ 0x00, NULL }
};
static const struct id_to_str arc_cpu_nm[] = {
#ifdef CONFIG_ISA_ARCOMPACT
{ 0x20, "ARC 600" },
{ 0x30, "ARC 770" }, /* 750 identified seperately */
#else
{ 0x40, "ARC EM" },
{ 0x50, "ARC HS38" },
#endif
{ 0x00, "Unknown" }
};
static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
{
if (is_isa_arcompact()) {
......@@ -92,11 +115,26 @@ static void read_arc_build_cfg_regs(void)
struct bcr_timer timer;
struct bcr_generic bcr;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
const struct id_to_str *tbl;
FIX_PTR(cpu);
READ_BCR(AUX_IDENTITY, cpu->core);
READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) {
if (cpu->core.family == tbl->id) {
cpu->details = tbl->str;
break;
}
}
for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) {
if ((cpu->core.family & 0xF0) == tbl->id)
break;
}
cpu->name = tbl->str;
READ_BCR(ARC_REG_TIMERS_BCR, timer);
cpu->extn.timer0 = timer.t0;
cpu->extn.timer1 = timer.t1;
......@@ -111,6 +149,9 @@ static void read_arc_build_cfg_regs(void)
cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */
cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0;
cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */
cpu->extn.swape = (cpu->core.family >= 0x34) ? 1 :
IS_ENABLED(CONFIG_ARC_HAS_SWAPE);
READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
/* Read CCM BCRs for boot reporting even if not enabled in Kconfig */
......@@ -160,64 +201,38 @@ static void read_arc_build_cfg_regs(void)
cpu->extn.rtt = bcr.ver ? 1 : 0;
cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
}
static const struct cpuinfo_data arc_cpu_tbl[] = {
#ifdef CONFIG_ISA_ARCOMPACT
{ {0x20, "ARC 600" }, 0x2F},
{ {0x30, "ARC 700" }, 0x33},
{ {0x34, "ARC 700 R4.10"}, 0x34},
{ {0x35, "ARC 700 R4.11"}, 0x35},
#else
{ {0x50, "ARC HS38 R2.0"}, 0x51},
{ {0x52, "ARC HS38 R2.1"}, 0x52},
{ {0x53, "ARC HS38 R3.0"}, 0x53},
#endif
{ {0x00, NULL } }
};
/* some hacks for lack of feature BCR info in old ARC700 cores */
if (is_isa_arcompact()) {
if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */
cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
else
cpu->isa.atomic = cpu->isa.atomic1;
cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
/* there's no direct way to distinguish 750 vs. 770 */
if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3))
cpu->name = "ARC750";
}
}
static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
{
struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
struct bcr_identity *core = &cpu->core;
const struct cpuinfo_data *tbl;
char *isa_nm;
int i, be, atomic;
int n = 0;
int i, n = 0;
FIX_PTR(cpu);
if (is_isa_arcompact()) {
isa_nm = "ARCompact";
be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
atomic = cpu->isa.atomic1;
if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */
atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
} else {
isa_nm = "ARCv2";
be = cpu->isa.be;
atomic = cpu->isa.atomic;
}
n += scnprintf(buf + n, len - n,
"\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
core->family, core->cpu_id, core->chip_id);
for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) {
if ((core->family >= tbl->info.id) &&
(core->family <= tbl->up_range)) {
n += scnprintf(buf + n, len - n,
"processor [%d]\t: %s (%s ISA) %s\n",
cpu_id, tbl->info.str, isa_nm,
IS_AVAIL1(be, "[Big-Endian]"));
break;
}
}
if (tbl->info.id == 0)
n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n");
n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s\n",
cpu_id, cpu->name, cpu->details,
is_isa_arcompact() ? "ARCompact" : "ARCv2",
IS_AVAIL1(cpu->isa.be, "[Big-Endian]"));
n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",
IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
......@@ -226,7 +241,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
CONFIG_ARC_HAS_RTC));
n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
IS_AVAIL1(cpu->isa.unalign, "unalign (not used)"));
......@@ -253,7 +268,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
IS_AVAIL1(cpu->extn.swap, "swap "),
IS_AVAIL1(cpu->extn.minmax, "minmax "),
IS_AVAIL1(cpu->extn.crc, "crc "),
IS_AVAIL2(1, "swape", CONFIG_ARC_HAS_SWAPE));
IS_AVAIL2(cpu->extn.swape, "swape", CONFIG_ARC_HAS_SWAPE));
if (cpu->bpu.ver)
n += scnprintf(buf + n, len - n,
......@@ -272,9 +287,7 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
FIX_PTR(cpu);
n += scnprintf(buf + n, len - n,
"Vector Table\t: %#x\nPeripherals\t: %#lx:%#lx\n",
cpu->vec_base, perip_base, perip_end);
n += scnprintf(buf + n, len - n, "Vector Table\t: %#x\n", cpu->vec_base);
if (cpu->extn.fpu_sp || cpu->extn.fpu_dp)
n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
......@@ -507,7 +520,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
* way to pass it w/o having to kmalloc/free a 2 byte string.
* Encode cpu-id as 0xFFcccc, which is decoded by show routine.
*/
return *pos < num_possible_cpus() ? cpu_to_ptr(*pos) : NULL;
return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
......
......@@ -237,113 +237,3 @@ void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
if (!user_mode(regs))
show_stacktrace(current, regs);
}
#ifdef CONFIG_DEBUG_FS
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/namei.h>
#include <linux/debugfs.h>
static struct dentry *test_dentry;
static struct dentry *test_dir;
static struct dentry *test_u32_dentry;
static u32 clr_on_read = 1;
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
u32 numitlb, numdtlb, num_pte_not_present;
static int fill_display_data(char *kbuf)
{
size_t num = 0;
num += sprintf(kbuf + num, "I-TLB Miss %x\n", numitlb);
num += sprintf(kbuf + num, "D-TLB Miss %x\n", numdtlb);
num += sprintf(kbuf + num, "PTE not present %x\n", num_pte_not_present);
if (clr_on_read)
numitlb = numdtlb = num_pte_not_present = 0;
return num;
}
static int tlb_stats_open(struct inode *inode, struct file *file)
{
file->private_data = (void *)__get_free_page(GFP_KERNEL);
return 0;
}
/* called on user read(): display the counters */
static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
char __user *user_buf, /* user buffer */
size_t len, /* length of buffer */
loff_t *offset) /* offset in the file */
{
size_t num;
char *kbuf = (char *)file->private_data;
/* All of the data can he shoved in one iteration */
if (*offset != 0)
return 0;
num = fill_display_data(kbuf);
/* simple_read_from_buffer() is helper for copy to user space
It copies up to @2 (num) bytes from kernel buffer @4 (kbuf) at offset
@3 (offset) into the user space address starting at @1 (user_buf).
@5 (len) is max size of user buffer
*/
return simple_read_from_buffer(user_buf, num, offset, kbuf, len);
}
/* called on user write : clears the counters */
static ssize_t tlb_stats_clear(struct file *file, const char __user *user_buf,
size_t length, loff_t *offset)
{
numitlb = numdtlb = num_pte_not_present = 0;
return length;
}
static int tlb_stats_close(struct inode *inode, struct file *file)
{
free_page((unsigned long)(file->private_data));
return 0;
}
static const struct file_operations tlb_stats_file_ops = {
.read = tlb_stats_output,
.write = tlb_stats_clear,
.open = tlb_stats_open,
.release = tlb_stats_close
};
#endif
static int __init arc_debugfs_init(void)
{
test_dir = debugfs_create_dir("arc", NULL);
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
test_dentry = debugfs_create_file("tlb_stats", 0444, test_dir, NULL,
&tlb_stats_file_ops);
#endif
test_u32_dentry =
debugfs_create_u32("clr_on_read", 0444, test_dir, &clr_on_read);
return 0;
}
module_init(arc_debugfs_init);
static void __exit arc_debugfs_exit(void)
{
debugfs_remove(test_u32_dentry);
debugfs_remove(test_dentry);
debugfs_remove(test_dir);
}
module_exit(arc_debugfs_exit);
#endif
......@@ -22,8 +22,8 @@
#include <asm/setup.h>
static int l2_line_sz;
int ioc_exists;
volatile int slc_enable = 1, ioc_enable = 1;
static int ioc_exists;
int slc_enable = 1, ioc_enable = 1;
unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
......@@ -53,18 +53,15 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
if (!is_isa_arcv2())
return buf;
p = &cpuinfo_arc700[c].slc;
if (p->ver)
n += scnprintf(buf + n, len - n,
"SLC\t\t: %uK, %uB Line%s\n",
p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
if (ioc_exists)
n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n",
IS_DISABLED_RUN(ioc_enable));
n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
perip_base,
IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));
return buf;
}
......@@ -113,8 +110,10 @@ static void read_decode_cache_bcr_arcv2(int cpu)
}
READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
if (cbcr.c && ioc_enable)
if (cbcr.c)
ioc_exists = 1;
else
ioc_enable = 0;
/* HS 2.0 didn't have AUX_VOL */
if (cpuinfo_arc700[cpu].core.family > 0x51) {
......@@ -1002,7 +1001,7 @@ void arc_cache_init(void)
read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
}
if (is_isa_arcv2() && ioc_exists) {
if (is_isa_arcv2() && ioc_enable) {
/* IO coherency base - 0x8z */
write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
/* IO coherency aperture size - 512Mb: 0x8z-0xAz */
......
......@@ -45,7 +45,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
* -For coherent data, Read/Write to buffers terminate early in cache
* (vs. always going to memory - thus are faster)
*/
if ((is_isa_arcv2() && ioc_exists) ||
if ((is_isa_arcv2() && ioc_enable) ||
(attrs & DMA_ATTR_NON_CONSISTENT))
need_coh = 0;
......@@ -97,7 +97,7 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
int is_non_coh = 1;
is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
(is_isa_arcv2() && ioc_exists);
(is_isa_arcv2() && ioc_enable);
if (PageHighMem(page) || !is_non_coh)
iounmap((void __force __iomem *)vaddr);
......
......@@ -793,16 +793,16 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
char super_pg[64] = "";
if (p_mmu->s_pg_sz_m)
scnprintf(super_pg, 64, "%dM Super Page%s, ",
scnprintf(super_pg, 64, "%dM Super Page %s",
p_mmu->s_pg_sz_m,
IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
n += scnprintf(buf + n, len - n,
"MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s%s\n",
"MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n",
p_mmu->ver, p_mmu->pg_sz_k, super_pg,
p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
p_mmu->u_dtlb, p_mmu->u_itlb,
IS_AVAIL2(p_mmu->pae, "PAE40 ", CONFIG_ARC_HAS_PAE40));
IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
return buf;
}
......
......@@ -237,15 +237,6 @@ ex_saved_reg1:
2:
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
and.f 0, r0, _PAGE_PRESENT
bz 1f
ld r3, [num_pte_not_present]
add r3, r3, 1
st r3, [num_pte_not_present]
1:
#endif
.endm
;-----------------------------------------------------------------
......@@ -309,12 +300,6 @@ ENTRY(EV_TLBMissI)
TLBMISS_FREEUP_REGS
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
ld r0, [@numitlb]
add r0, r0, 1
st r0, [@numitlb]
#endif
;----------------------------------------------------------------
; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA
LOAD_FAULT_PTE
......@@ -349,12 +334,6 @@ ENTRY(EV_TLBMissD)
TLBMISS_FREEUP_REGS
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
ld r0, [@numdtlb]
add r0, r0, 1
st r0, [@numdtlb]
#endif
;----------------------------------------------------------------
; Get the PTE corresponding to V-addr accessed
; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment