Commit 44786880 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'parisc-4.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc updates from Helge Deller:
 "Lots of small fixes and enhancements, most noteably:

   - Many TLB and cache flush optimizations (Dave)

   - Fixed HPMC/crash handler on 64-bit kernel (Dave and myself)

   - Added alternative infrastructre. The kernel now live-patches itself
     for various situations, e.g. replace SMP code when running on one
     CPU only or drop cache flushes when system has no cache installed.

   - vmlinuz now contains a full copy of the compressed vmlinux file.
     This simplifies debugging the currently booted kernel.

   - Unused driver removal (Christoph)

   - Reduced warnings of Dino PCI bridge when running in qemu

   - Removed gcc version check (Masahiro)"

* 'parisc-4.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux: (23 commits)
  parisc: Retrieve and display the PDC PAT capabilities
  parisc: Optimze cache flush algorithms
  parisc: Remove pte_inserted define
  parisc: Add PDC PAT cell_info() and pd_get_pdc_revisions() functions
  parisc: Drop two instructions from pte lookup code
  parisc: Use zdep for shlw macro on PA1.1 and PA2.0
  parisc: Add alternative coding infrastructure
  parisc: Include compressed vmlinux file in vmlinuz boot kernel
  extract-vmlinux: Check for uncompressed image as fallback
  parisc: Fix address in HPMC IVA
  parisc: Fix exported address of os_hpmc handler
  parisc: Fix map_pages() to not overwrite existing pte entries
  parisc: Purge TLB entries after updating page table entry and set page accessed flag in TLB handler
  parisc: Release spinlocks using ordered store
  parisc: Ratelimit dino stuck interrupt warnings
  parisc: dino: Utilize DINO_MASK_IRQ() macro
  parisc: Clean up crash header output
  parisc: Add SYSTEM_INFO and REGISTER TOC PAT functions
  parisc: Remove PTE load and fault check from L2_ptep macro
  parisc: Reorder TLB flush timing calculation
  ...
parents 07171da2 e543b3a6
......@@ -156,12 +156,3 @@ define archhelp
@echo ' copy to $$(INSTALL_PATH)'
@echo ' zinstall - Install compressed vmlinuz kernel'
endef
# we require gcc 3.3 or above to compile the kernel
archprepare: checkbin
checkbin:
@if test "$(cc-version)" -lt "0303"; then \
echo -n "Sorry, GCC v3.3 or above is required to build " ; \
echo "the kernel." ; \
false ; \
fi
......@@ -14,7 +14,7 @@ targets += misc.o piggy.o sizes.h head.o real2.o firmware.o
KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -fno-builtin-printf
KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs -Os
ifndef CONFIG_64BIT
KBUILD_CFLAGS += -mfast-indirect-calls
......@@ -22,7 +22,6 @@ endif
OBJECTS += $(obj)/head.o $(obj)/real2.o $(obj)/firmware.o $(obj)/misc.o $(obj)/piggy.o
# LDFLAGS_vmlinux := -X --whole-archive -e startup -T
LDFLAGS_vmlinux := -X -e startup --as-needed -T
$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS) $(LIBGCC)
$(call if_changed,ld)
......@@ -55,7 +54,6 @@ $(obj)/misc.o: $(obj)/sizes.h
CPPFLAGS_vmlinux.lds += -I$(objtree)/$(obj) -DBOOTLOADER
$(obj)/vmlinux.lds: $(obj)/sizes.h
OBJCOPYFLAGS_vmlinux.bin := -O binary -R .comment -S
$(obj)/vmlinux.bin: vmlinux
$(call if_changed,objcopy)
......
......@@ -5,6 +5,7 @@
*/
#include <linux/uaccess.h>
#include <linux/elf.h>
#include <asm/unaligned.h>
#include <asm/page.h>
#include "sizes.h"
......@@ -227,13 +228,62 @@ static void flush_data_cache(char *start, unsigned long length)
asm ("sync");
}
static void parse_elf(void *output)
{
#ifdef CONFIG_64BIT
Elf64_Ehdr ehdr;
Elf64_Phdr *phdrs, *phdr;
#else
Elf32_Ehdr ehdr;
Elf32_Phdr *phdrs, *phdr;
#endif
void *dest;
int i;
memcpy(&ehdr, output, sizeof(ehdr));
if (ehdr.e_ident[EI_MAG0] != ELFMAG0 ||
ehdr.e_ident[EI_MAG1] != ELFMAG1 ||
ehdr.e_ident[EI_MAG2] != ELFMAG2 ||
ehdr.e_ident[EI_MAG3] != ELFMAG3) {
error("Kernel is not a valid ELF file");
return;
}
#ifdef DEBUG
printf("Parsing ELF... ");
#endif
phdrs = malloc(sizeof(*phdrs) * ehdr.e_phnum);
if (!phdrs)
error("Failed to allocate space for phdrs");
memcpy(phdrs, output + ehdr.e_phoff, sizeof(*phdrs) * ehdr.e_phnum);
for (i = 0; i < ehdr.e_phnum; i++) {
phdr = &phdrs[i];
switch (phdr->p_type) {
case PT_LOAD:
dest = (void *)((unsigned long) phdr->p_paddr &
(__PAGE_OFFSET_DEFAULT-1));
memmove(dest, output + phdr->p_offset, phdr->p_filesz);
break;
default:
break;
}
}
free(phdrs);
}
unsigned long decompress_kernel(unsigned int started_wide,
unsigned int command_line,
const unsigned int rd_start,
const unsigned int rd_end)
{
char *output;
unsigned long len, len_all;
unsigned long vmlinux_addr, vmlinux_len;
unsigned long kernel_addr, kernel_len;
#ifdef CONFIG_64BIT
parisc_narrow_firmware = 0;
......@@ -241,27 +291,29 @@ unsigned long decompress_kernel(unsigned int started_wide,
set_firmware_width_unlocked();
putchar('U'); /* if you get this p and no more, string storage */
putchar('D'); /* if you get this D and no more, string storage */
/* in $GLOBAL$ is wrong or %dp is wrong */
puts("ncompressing ...\n");
output = (char *) KERNEL_BINARY_TEXT_START;
len_all = __pa(SZ_end) - __pa(SZparisc_kernel_start);
puts("ecompressing Linux... ");
if ((unsigned long) &_startcode_end > (unsigned long) output)
/* where the final bits are stored */
kernel_addr = KERNEL_BINARY_TEXT_START;
kernel_len = __pa(SZ_end) - __pa(SZparisc_kernel_start);
if ((unsigned long) &_startcode_end > kernel_addr)
error("Bootcode overlaps kernel code");
len = get_unaligned_le32(&output_len);
if (len > len_all)
error("Output len too big.");
else
memset(&output[len], 0, len_all - len);
/*
* Calculate addr to where the vmlinux ELF file shall be decompressed.
* Assembly code in head.S positioned the stack directly behind bss, so
* leave 2 MB for the stack.
*/
vmlinux_addr = (unsigned long) &_ebss + 2*1024*1024;
vmlinux_len = get_unaligned_le32(&output_len);
output = (char *) vmlinux_addr;
/*
* Initialize free_mem_ptr and free_mem_end_ptr.
*/
free_mem_ptr = (unsigned long) &_ebss;
free_mem_ptr += 2*1024*1024; /* leave 2 MB for stack */
free_mem_ptr = vmlinux_addr + vmlinux_len;
/* Limit memory for bootoader to 1GB */
#define ARTIFICIAL_LIMIT (1*1024*1024*1024)
......@@ -275,7 +327,11 @@ unsigned long decompress_kernel(unsigned int started_wide,
free_mem_end_ptr = rd_start;
#endif
if (free_mem_ptr >= free_mem_end_ptr)
error("Kernel too big for machine.");
#ifdef DEBUG
printf("\n");
printf("startcode_end = %x\n", &_startcode_end);
printf("commandline = %x\n", command_line);
printf("rd_start = %x\n", rd_start);
......@@ -287,16 +343,19 @@ unsigned long decompress_kernel(unsigned int started_wide,
printf("input_data = %x\n", input_data);
printf("input_len = %x\n", input_len);
printf("output = %x\n", output);
printf("output_len = %x\n", len);
printf("output_max = %x\n", len_all);
printf("output_len = %x\n", vmlinux_len);
printf("kernel_addr = %x\n", kernel_addr);
printf("kernel_len = %x\n", kernel_len);
#endif
__decompress(input_data, input_len, NULL, NULL,
output, 0, NULL, error);
parse_elf(output);
flush_data_cache(output, len);
output = (char *) kernel_addr;
flush_data_cache(output, kernel_len);
printf("Booting kernel ...\n\n");
printf("done.\nBooting the kernel.\n");
return (unsigned long) output;
}
......@@ -42,6 +42,12 @@ SECTIONS
#endif
_startcode_end = .;
/* vmlinux.bin.gz is here */
. = ALIGN(8);
.rodata.compressed : {
*(.rodata.compressed)
}
/* bootloader code and data starts behind area of extracted kernel */
. = (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START);
......@@ -68,10 +74,6 @@ SECTIONS
_erodata = . ;
}
. = ALIGN(8);
.rodata.compressed : {
*(.rodata.compressed)
}
. = ALIGN(8);
.bss : {
_bss = . ;
*(.bss)
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_PARISC_ALTERNATIVE_H
#define __ASM_PARISC_ALTERNATIVE_H
#define ALT_COND_NO_SMP 0x01 /* when running UP instead of SMP */
#define ALT_COND_NO_DCACHE 0x02 /* if system has no d-cache */
#define ALT_COND_NO_ICACHE 0x04 /* if system has no i-cache */
#define ALT_COND_NO_SPLIT_TLB 0x08 /* if split_tlb == 0 */
#define ALT_COND_NO_IOC_FDC 0x10 /* if I/O cache does not need flushes */
#define INSN_PxTLB 0x02 /* modify pdtlb, pitlb */
#define INSN_NOP 0x08000240 /* nop */
#ifndef __ASSEMBLY__
#include <linux/init.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/stringify.h>
struct alt_instr {
s32 orig_offset; /* offset to original instructions */
u32 len; /* end of original instructions */
u32 cond; /* see ALT_COND_XXX */
u32 replacement; /* replacement instruction or code */
};
void set_kernel_text_rw(int enable_read_write);
/* Alternative SMP implementation. */
#define ALTERNATIVE(cond, replacement) "!0:" \
".section .altinstructions, \"aw\" !" \
".word (0b-4-.), 1, " __stringify(cond) "," \
__stringify(replacement) " !" \
".previous"
#else
#define ALTERNATIVE(from, to, cond, replacement)\
.section .altinstructions, "aw" ! \
.word (from - .), (to - from)/4 ! \
.word cond, replacement ! \
.previous
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PARISC_ALTERNATIVE_H */
......@@ -129,15 +129,8 @@
.macro debug value
.endm
/* Shift Left - note the r and t can NOT be the same! */
.macro shl r, sa, t
dep,z \r, 31-(\sa), 32-(\sa), \t
.endm
/* The PA 2.0 shift left */
.macro shlw r, sa, t
depw,z \r, 31-(\sa), 32-(\sa), \t
zdep \r, 31-(\sa), 32-(\sa), \t
.endm
/* And the PA 2.0W shift left */
......
......@@ -6,6 +6,7 @@
#ifndef __ARCH_PARISC_CACHE_H
#define __ARCH_PARISC_CACHE_H
#include <asm/alternative.h>
/*
* PA 2.0 processors have 64 and 128-byte L2 cachelines; PA 1.1 processors
......@@ -41,9 +42,24 @@ extern int icache_stride;
extern struct pdc_cache_info cache_info;
void parisc_setup_cache_timing(void);
#define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" : : "r" (addr));
#define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" : : "r" (addr));
#define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" : : "r" (addr));
#define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" \
ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
: : "r" (addr))
#define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" \
ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
ALTERNATIVE(ALT_COND_NO_SPLIT_TLB, INSN_NOP) \
: : "r" (addr))
#define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" \
ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
: : "r" (addr))
#define asm_io_fdc(addr) asm volatile("fdc %%r0(%0)" \
ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \
ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) \
: : "r" (addr))
#define asm_io_sync() asm volatile("sync" \
ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \
ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :: )
#endif /* ! __ASSEMBLY__ */
......
......@@ -117,14 +117,16 @@ extern int npmem_ranges;
/* This governs the relationship between virtual and physical addresses.
* If you alter it, make sure to take care of our various fixed mapping
* segments in fixmap.h */
#if defined(BOOTLOADER)
#define __PAGE_OFFSET (0) /* bootloader uses physical addresses */
#else
#ifdef CONFIG_64BIT
#define __PAGE_OFFSET (0x40000000) /* 1GB */
#define __PAGE_OFFSET_DEFAULT (0x40000000) /* 1GB */
#else
#define __PAGE_OFFSET (0x10000000) /* 256MB */
#define __PAGE_OFFSET_DEFAULT (0x10000000) /* 256MB */
#endif
#if defined(BOOTLOADER)
#define __PAGE_OFFSET (0) /* bootloader uses physical addresses */
#else
#define __PAGE_OFFSET __PAGE_OFFSET_DEFAULT
#endif /* BOOTLOADER */
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
......
......@@ -11,6 +11,7 @@ extern int parisc_narrow_firmware;
extern int pdc_type;
extern unsigned long parisc_cell_num; /* cell number the CPU runs on (PAT) */
extern unsigned long parisc_cell_loc; /* cell location of CPU (PAT) */
extern unsigned long parisc_pat_pdc_cap; /* PDC capabilities (PAT) */
/* Values for pdc_type */
#define PDC_TYPE_ILLEGAL -1
......
......@@ -173,6 +173,16 @@
/* PDC PAT PD */
#define PDC_PAT_PD 74L /* Protection Domain Info */
#define PDC_PAT_PD_GET_ADDR_MAP 0L /* Get Address Map */
#define PDC_PAT_PD_GET_PDC_INTERF_REV 1L /* Get PDC Interface Revisions */
#define PDC_PAT_CAPABILITY_BIT_PDC_SERIALIZE (1UL << 0)
#define PDC_PAT_CAPABILITY_BIT_PDC_POLLING (1UL << 1)
#define PDC_PAT_CAPABILITY_BIT_PDC_NBC (1UL << 2) /* non-blocking calls */
#define PDC_PAT_CAPABILITY_BIT_PDC_UFO (1UL << 3)
#define PDC_PAT_CAPABILITY_BIT_PDC_IODC_32 (1UL << 4)
#define PDC_PAT_CAPABILITY_BIT_PDC_IODC_64 (1UL << 5)
#define PDC_PAT_CAPABILITY_BIT_PDC_HPMC_RENDEZ (1UL << 6)
#define PDC_PAT_CAPABILITY_BIT_SIMULTANEOUS_PTLB (1UL << 7)
/* PDC_PAT_PD_GET_ADDR_MAP entry types */
#define PAT_MEMORY_DESCRIPTOR 1
......@@ -186,6 +196,14 @@
#define PAT_MEMUSE_GI 128
#define PAT_MEMUSE_GNI 129
/* PDC PAT REGISTER TOC */
#define PDC_PAT_REGISTER_TOC 75L
#define PDC_PAT_TOC_REGISTER_VECTOR 0L /* Register TOC Vector */
#define PDC_PAT_TOC_READ_VECTOR 1L /* Read TOC Vector */
/* PDC PAT SYSTEM_INFO */
#define PDC_PAT_SYSTEM_INFO 76L
/* PDC_PAT_SYSTEM_INFO uses the same options as PDC_SYSTEM_INFO function. */
#ifndef __ASSEMBLY__
#include <linux/types.h>
......@@ -297,18 +315,29 @@ struct pdc_pat_pd_addr_map_entry {
** PDC_PAT_CELL_GET_INFO return block
*/
typedef struct pdc_pat_cell_info_rtn_block {
unsigned long cpu_info;
unsigned long cell_info;
unsigned long cell_location;
unsigned long reo_location;
unsigned long mem_size;
unsigned long dimm_status;
unsigned long pdc_rev;
unsigned long fabric_info0;
unsigned long fabric_info1;
unsigned long fabric_info2;
unsigned long fabric_info3;
unsigned long reserved[21];
unsigned long capabilities; /* see PDC_PAT_CAPABILITY_BIT_* */
unsigned long reserved0[2];
unsigned long cell_info; /* 0x20 */
unsigned long cell_phys_location;
unsigned long cpu_info;
unsigned long cpu_speed;
unsigned long io_chassis_phys_location;
unsigned long cell_io_information;
unsigned long reserved1[2];
unsigned long io_slot_info_size; /* 0x60 */
struct {
unsigned long header, info0, info1;
unsigned long phys_loc, hw_path;
} io_slot[16];
unsigned long cell_mem_size; /* 0x2e8 */
unsigned long cell_dimm_info_size;
unsigned long dimm_info[16];
unsigned long fabric_info_size; /* 0x3f8 */
struct { /* 0x380 */
unsigned long fabric_info_xbc_port;
unsigned long rc_attached_to_xbc;
} xbc[8*4];
} pdc_pat_cell_info_rtn_block_t;
......@@ -326,12 +355,19 @@ typedef struct pdc_pat_cell_mod_maddr_block pdc_pat_cell_mod_maddr_block_t;
extern int pdc_pat_chassis_send_log(unsigned long status, unsigned long data);
extern int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info);
extern int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long mod, unsigned long view_type, void *mem_addr);
extern int pdc_pat_cell_info(struct pdc_pat_cell_info_rtn_block *info,
unsigned long *actcnt, unsigned long offset,
unsigned long cell_number);
extern int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc,
unsigned long mod, unsigned long view_type, void *mem_addr);
extern int pdc_pat_cell_num_to_loc(void *, unsigned long);
extern int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, unsigned long hpa);
extern int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr, unsigned long count, unsigned long offset);
extern int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr,
unsigned long count, unsigned long offset);
extern int pdc_pat_pd_get_pdc_revisions(unsigned long *legacy_rev,
unsigned long *pat_rev, unsigned long *pdc_cap);
extern int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *val);
extern int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val);
......
......@@ -43,8 +43,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
{
mtsp(mm->context, 1);
pdtlb(addr);
if (unlikely(split_tlb))
pitlb(addr);
pitlb(addr);
}
/* Certain architectures need to do special things when PTEs
......@@ -56,19 +55,14 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
*(pteptr) = (pteval); \
} while(0)
#define pte_inserted(x) \
((pte_val(x) & (_PAGE_PRESENT|_PAGE_ACCESSED)) \
== (_PAGE_PRESENT|_PAGE_ACCESSED))
#define set_pte_at(mm, addr, ptep, pteval) \
do { \
pte_t old_pte; \
unsigned long flags; \
spin_lock_irqsave(&pa_tlb_lock, flags); \
old_pte = *ptep; \
if (pte_inserted(old_pte)) \
purge_tlb_entries(mm, addr); \
set_pte(ptep, pteval); \
purge_tlb_entries(mm, addr); \
spin_unlock_irqrestore(&pa_tlb_lock, flags); \
} while (0)
......@@ -202,7 +196,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT))
#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
#define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXEC)
......@@ -227,22 +221,22 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#ifndef __ASSEMBLY__
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE)
/* Others seem to make this executable, I don't know if that's correct
or not. The stack is mapped this way though so this is necessary
in the short term - dhd@linuxcare.com, 2000-08-08 */
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE | _PAGE_ACCESSED)
#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ)
#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE)
#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC)
#define PAGE_COPY PAGE_EXECREAD
#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
#define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
#define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ)
#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_GATEWAY| _PAGE_READ)
/*
......@@ -479,8 +473,8 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
spin_unlock_irqrestore(&pa_tlb_lock, flags);
return 0;
}
purge_tlb_entries(vma->vm_mm, addr);
set_pte(ptep, pte_mkold(pte));
purge_tlb_entries(vma->vm_mm, addr);
spin_unlock_irqrestore(&pa_tlb_lock, flags);
return 1;
}
......@@ -493,9 +487,8 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
spin_lock_irqsave(&pa_tlb_lock, flags);
old_pte = *ptep;
if (pte_inserted(old_pte))
purge_tlb_entries(mm, addr);
set_pte(ptep, __pte(0));
purge_tlb_entries(mm, addr);
spin_unlock_irqrestore(&pa_tlb_lock, flags);
return old_pte;
......@@ -505,8 +498,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
{
unsigned long flags;
spin_lock_irqsave(&pa_tlb_lock, flags);
purge_tlb_entries(mm, addr);
set_pte(ptep, pte_wrprotect(*ptep));
purge_tlb_entries(mm, addr);
spin_unlock_irqrestore(&pa_tlb_lock, flags);
}
......
......@@ -5,6 +5,8 @@
/* nothing to see, move along */
#include <asm-generic/sections.h>
extern char __alt_instructions[], __alt_instructions_end[];
#ifdef CONFIG_64BIT
#define HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR 1
......
......@@ -37,8 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
volatile unsigned int *a;
a = __ldcw_align(x);
mb();
*a = 1;
/* Release with ordered store. */
__asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *x)
......
......@@ -85,8 +85,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
purge_tlb_start(flags);
mtsp(sid, 1);
pdtlb(addr);
if (unlikely(split_tlb))
pitlb(addr);
pitlb(addr);
purge_tlb_end(flags);
}
#endif
......@@ -36,6 +36,7 @@ EXPORT_SYMBOL(dcache_stride);
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
EXPORT_SYMBOL(flush_dcache_page_asm);
void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
......@@ -303,6 +304,17 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
preempt_enable();
}
static inline void
__purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
unsigned long physaddr)
{
preempt_disable();
purge_dcache_page_asm(physaddr, vmaddr);
if (vma->vm_flags & VM_EXEC)
flush_icache_page_asm(physaddr, vmaddr);
preempt_enable();
}
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping_file(page);
......@@ -364,7 +376,7 @@ EXPORT_SYMBOL(flush_kernel_icache_range_asm);
#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
#define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
void __init parisc_setup_cache_timing(void)
......@@ -404,10 +416,6 @@ void __init parisc_setup_cache_timing(void)
goto set_tlb_threshold;
}
alltime = mfctl(16);
flush_tlb_all();
alltime = mfctl(16) - alltime;
size = 0;
start = (unsigned long) _text;
rangetime = mfctl(16);
......@@ -418,13 +426,19 @@ void __init parisc_setup_cache_timing(void)
}
rangetime = mfctl(16) - rangetime;
printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
alltime = mfctl(16);
flush_tlb_all();
alltime = mfctl(16) - alltime;
printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
alltime, size, rangetime);
threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
threshold/1024);
set_tlb_threshold:
if (threshold)
if (threshold > parisc_tlb_flush_threshold)
parisc_tlb_flush_threshold = threshold;
printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
parisc_tlb_flush_threshold/1024);
......@@ -477,18 +491,6 @@ int __flush_tlb_range(unsigned long sid, unsigned long start,
/* Purge TLB entries for small ranges using the pdtlb and
pitlb instructions. These instructions execute locally
but cause a purge request to be broadcast to other TLBs. */
if (likely(!split_tlb)) {
while (start < end) {
purge_tlb_start(flags);
mtsp(sid, 1);
pdtlb(start);
purge_tlb_end(flags);
start += PAGE_SIZE;
}
return 0;
}
/* split TLB case */
while (start < end) {
purge_tlb_start(flags);
mtsp(sid, 1);
......@@ -573,9 +575,12 @@ void flush_cache_mm(struct mm_struct *mm)
pfn = pte_pfn(*ptep);
if (!pfn_valid(pfn))
continue;
if (unlikely(mm->context))
if (unlikely(mm->context)) {
flush_tlb_page(vma, addr);
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
} else {
__purge_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
}
}
......@@ -610,9 +615,12 @@ void flush_cache_range(struct vm_area_struct *vma,
continue;
pfn = pte_pfn(*ptep);
if (pfn_valid(pfn)) {
if (unlikely(vma->vm_mm->context))
if (unlikely(vma->vm_mm->context)) {
flush_tlb_page(vma, addr);
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
} else {
__purge_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
}
}
......@@ -621,9 +629,12 @@ void
flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{
if (pfn_valid(pfn)) {
if (likely(vma->vm_mm->context))
if (likely(vma->vm_mm->context)) {
flush_tlb_page(vma, vmaddr);
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
} else {
__purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
}
}
......
......@@ -38,6 +38,7 @@
#include <asm/ldcw.h>
#include <asm/traps.h>
#include <asm/thread_info.h>
#include <asm/alternative.h>
#include <linux/linkage.h>
......@@ -186,7 +187,7 @@
bv,n 0(%r3)
nop
.word 0 /* checksum (will be patched) */
.word PA(os_hpmc) /* address of handler */
.word 0 /* address of handler */
.word 0 /* length of handler */
.endm
......@@ -426,13 +427,10 @@
ldw,s \index(\pmd),\pmd
bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
copy \pmd,%r9
SHLREG %r9,PxD_VALUE_SHIFT,\pmd
SHLREG \pmd,PxD_VALUE_SHIFT,\pmd
extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
LDREG %r0(\pmd),\pte
bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
.endm
/* Look up PTE in a 3-Level scheme.
......@@ -448,7 +446,6 @@
.macro L3_ptep pgd,pte,index,va,fault
#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
copy %r0,\pte
extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
ldw,s \index(\pgd),\pgd
extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
......@@ -463,36 +460,39 @@
L2_ptep \pgd,\pte,\index,\va,\fault
.endm
/* Acquire pa_tlb_lock lock and recheck page is still present. */
/* Acquire pa_tlb_lock lock and check page is present. */
.macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
#ifdef CONFIG_SMP
cmpib,COND(=),n 0,\spc,2f
98: cmpib,COND(=),n 0,\spc,2f
load_pa_tlb_lock \tmp
1: LDCW 0(\tmp),\tmp1
cmpib,COND(=) 0,\tmp1,1b
nop
LDREG 0(\ptp),\pte
bb,<,n \pte,_PAGE_PRESENT_BIT,2f
bb,<,n \pte,_PAGE_PRESENT_BIT,3f
b \fault
stw \spc,0(\tmp)
2:
stw,ma \spc,0(\tmp)
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
2: LDREG 0(\ptp),\pte
bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
3:
.endm
/* Release pa_tlb_lock lock without reloading lock address. */
.macro tlb_unlock0 spc,tmp
#ifdef CONFIG_SMP
or,COND(=) %r0,\spc,%r0
sync
or,COND(=) %r0,\spc,%r0
stw \spc,0(\tmp)
98: or,COND(=) %r0,\spc,%r0
stw,ma \spc,0(\tmp)
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
.endm
/* Release pa_tlb_lock lock. */
.macro tlb_unlock1 spc,tmp
#ifdef CONFIG_SMP
load_pa_tlb_lock \tmp
98: load_pa_tlb_lock \tmp
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
tlb_unlock0 \spc,\tmp
#endif
.endm
......@@ -1658,7 +1658,7 @@ dbit_fault:
itlb_fault:
b intr_save
ldi 6,%r8
ldi PARISC_ITLB_TRAP,%r8
nadtlb_fault:
b intr_save
......
......@@ -1325,6 +1325,36 @@ int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long
return retval;
}
/**
* pdc_pat_cell_info - Retrieve the cell's information.
* @info: The pointer to a struct pdc_pat_cell_info_rtn_block.
* @actcnt: The number of bytes which should be written to info.
* @offset: offset of the structure.
* @cell_number: The cell number which should be asked, or -1 for current cell.
*
* This PDC call returns information about the given cell (or all cells).
*/
int pdc_pat_cell_info(struct pdc_pat_cell_info_rtn_block *info,
unsigned long *actcnt, unsigned long offset,
unsigned long cell_number)
{
int retval;
unsigned long flags;
struct pdc_pat_cell_info_rtn_block result;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PAT_CELL, PDC_PAT_CELL_GET_INFO,
__pa(pdc_result), __pa(&result), *actcnt,
offset, cell_number);
if (!retval) {
*actcnt = pdc_result[0];
memcpy(info, &result, *actcnt);
}
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_pat_cpu_get_number - Retrieve the cpu number.
* @cpu_info: The return buffer.
......@@ -1412,6 +1442,33 @@ int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr,
return retval;
}
/**
* pdc_pat_pd_get_PDC_interface_revisions - Retrieve PDC interface revisions.
* @legacy_rev: The legacy revision.
* @pat_rev: The PAT revision.
* @pdc_cap: The PDC capabilities.
*
*/
int pdc_pat_pd_get_pdc_revisions(unsigned long *legacy_rev,
unsigned long *pat_rev, unsigned long *pdc_cap)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PAT_PD, PDC_PAT_PD_GET_PDC_INTERF_REV,
__pa(pdc_result));
if (retval == PDC_OK) {
*legacy_rev = pdc_result[0];
*pat_rev = pdc_result[1];
*pdc_cap = pdc_result[2];
}
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_pat_io_pci_cfg_read - Read PCI configuration space.
* @pci_addr: PCI configuration space address for which the read request is being made.
......
......@@ -85,7 +85,7 @@ END(hpmc_pim_data)
.import intr_save, code
.align 16
ENTRY_CFI(os_hpmc)
ENTRY(os_hpmc)
.os_hpmc:
/*
......@@ -302,7 +302,6 @@ os_hpmc_6:
b .
nop
.align 16 /* make function length multiple of 16 bytes */
ENDPROC_CFI(os_hpmc)
.os_hpmc_end:
......
......@@ -43,6 +43,7 @@ int pdc_type __read_mostly = PDC_TYPE_ILLEGAL;
/* cell number and location (PAT firmware only) */
unsigned long parisc_cell_num __read_mostly;
unsigned long parisc_cell_loc __read_mostly;
unsigned long parisc_pat_pdc_cap __read_mostly;
void __init setup_pdc(void)
......@@ -81,12 +82,21 @@ void __init setup_pdc(void)
#ifdef CONFIG_64BIT
status = pdc_pat_cell_get_number(&cell_info);
if (status == PDC_OK) {
unsigned long legacy_rev, pat_rev;
pdc_type = PDC_TYPE_PAT;
pr_cont("64 bit PAT.\n");
parisc_cell_num = cell_info.cell_num;
parisc_cell_loc = cell_info.cell_loc;
pr_info("PAT: Running on cell %lu and location %lu.\n",
parisc_cell_num, parisc_cell_loc);
status = pdc_pat_pd_get_pdc_revisions(&legacy_rev,
&pat_rev, &parisc_pat_pdc_cap);
pr_info("PAT: legacy revision 0x%lx, pat_rev 0x%lx, pdc_cap 0x%lx, S-PTLB %d, HPMC_RENDEZ %d.\n",
legacy_rev, pat_rev, parisc_pat_pdc_cap,
parisc_pat_pdc_cap
& PDC_PAT_CAPABILITY_BIT_SIMULTANEOUS_PTLB ? 1:0,
parisc_pat_pdc_cap
& PDC_PAT_CAPABILITY_BIT_PDC_HPMC_RENDEZ ? 1:0);
return;
}
#endif
......
......@@ -37,6 +37,7 @@
#include <asm/pgtable.h>
#include <asm/cache.h>
#include <asm/ldcw.h>
#include <asm/alternative.h>
#include <linux/linkage.h>
#include <linux/init.h>
......@@ -190,7 +191,7 @@ ENDPROC_CFI(flush_tlb_all_local)
.import cache_info,data
ENTRY_CFI(flush_instruction_cache_local)
load32 cache_info, %r1
88: load32 cache_info, %r1
/* Flush Instruction Cache */
......@@ -243,6 +244,7 @@ fioneloop2:
fisync:
sync
mtsm %r22 /* restore I-bit */
89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP)
bv %r0(%r2)
nop
ENDPROC_CFI(flush_instruction_cache_local)
......@@ -250,7 +252,7 @@ ENDPROC_CFI(flush_instruction_cache_local)
.import cache_info, data
ENTRY_CFI(flush_data_cache_local)
load32 cache_info, %r1
88: load32 cache_info, %r1
/* Flush Data Cache */
......@@ -304,6 +306,7 @@ fdsync:
syncdma
sync
mtsm %r22 /* restore I-bit */
89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
bv %r0(%r2)
nop
ENDPROC_CFI(flush_data_cache_local)
......@@ -312,6 +315,7 @@ ENDPROC_CFI(flush_data_cache_local)
.macro tlb_lock la,flags,tmp
#ifdef CONFIG_SMP
98:
#if __PA_LDCW_ALIGNMENT > 4
load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
depi 0,31,__PA_LDCW_ALIGN_ORDER, \la
......@@ -326,15 +330,17 @@ ENDPROC_CFI(flush_data_cache_local)
nop
b,n 2b
3:
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
.endm
.macro tlb_unlock la,flags,tmp
#ifdef CONFIG_SMP
ldi 1,\tmp
98: ldi 1,\tmp
sync
stw \tmp,0(\la)
mtsm \flags
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
.endm
......@@ -596,9 +602,11 @@ ENTRY_CFI(copy_user_page_asm)
pdtlb,l %r0(%r29)
#else
tlb_lock %r20,%r21,%r22
pdtlb %r0(%r28)
pdtlb %r0(%r29)
0: pdtlb %r0(%r28)
1: pdtlb %r0(%r29)
tlb_unlock %r20,%r21,%r22
ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SMP, INSN_PxTLB)
#endif
#ifdef CONFIG_64BIT
......@@ -736,8 +744,9 @@ ENTRY_CFI(clear_user_page_asm)
pdtlb,l %r0(%r28)
#else
tlb_lock %r20,%r21,%r22
pdtlb %r0(%r28)
0: pdtlb %r0(%r28)
tlb_unlock %r20,%r21,%r22
ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
#endif
#ifdef CONFIG_64BIT
......@@ -813,11 +822,12 @@ ENTRY_CFI(flush_dcache_page_asm)
pdtlb,l %r0(%r28)
#else
tlb_lock %r20,%r21,%r22
pdtlb %r0(%r28)
0: pdtlb %r0(%r28)
tlb_unlock %r20,%r21,%r22
ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
#endif
ldil L%dcache_stride, %r1
88: ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), r31
#ifdef CONFIG_64BIT
......@@ -828,8 +838,7 @@ ENTRY_CFI(flush_dcache_page_asm)
add %r28, %r25, %r25
sub %r25, r31, %r25
1: fdc,m r31(%r28)
1: fdc,m r31(%r28)
fdc,m r31(%r28)
fdc,m r31(%r28)
fdc,m r31(%r28)
......@@ -844,14 +853,76 @@ ENTRY_CFI(flush_dcache_page_asm)
fdc,m r31(%r28)
fdc,m r31(%r28)
fdc,m r31(%r28)
cmpb,COND(<<) %r28, %r25,1b
cmpb,COND(>>) %r25, %r28, 1b /* predict taken */
fdc,m r31(%r28)
89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(flush_dcache_page_asm)
ENTRY_CFI(purge_dcache_page_asm)
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
depdi 0, 31,32, %r28 /* clear any sign extension */
#endif
convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
#else
extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
#endif
/* Purge any old translation */
#ifdef CONFIG_PA20
pdtlb,l %r0(%r28)
#else
tlb_lock %r20,%r21,%r22
0: pdtlb %r0(%r28)
tlb_unlock %r20,%r21,%r22
ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
#endif
88: ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), r31
#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
#else
depwi,z 1, 31-PAGE_SHIFT,1, %r25
#endif
add %r28, %r25, %r25
sub %r25, r31, %r25
1: pdc,m r31(%r28)
pdc,m r31(%r28)
pdc,m r31(%r28)
pdc,m r31(%r28)
pdc,m r31(%r28)
pdc,m r31(%r28)
pdc,m r31(%r28)
pdc,m r31(%r28)
pdc,m r31(%r28)
pdc,m r31(%r28)
pdc,m r31(%r28)
pdc,m r31(%r28)
pdc,m r31(%r28)
pdc,m r31(%r28)
pdc,m r31(%r28)
cmpb,COND(>>) %r25, %r28, 1b /* predict taken */
pdc,m r31(%r28)
89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(purge_dcache_page_asm)
ENTRY_CFI(flush_icache_page_asm)
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
......@@ -874,15 +945,19 @@ ENTRY_CFI(flush_icache_page_asm)
#ifdef CONFIG_PA20
pdtlb,l %r0(%r28)
pitlb,l %r0(%sr4,%r28)
1: pitlb,l %r0(%sr4,%r28)
ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SPLIT_TLB, INSN_NOP)
#else
tlb_lock %r20,%r21,%r22
pdtlb %r0(%r28)
pitlb %r0(%sr4,%r28)
0: pdtlb %r0(%r28)
1: pitlb %r0(%sr4,%r28)
tlb_unlock %r20,%r21,%r22
ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SMP, INSN_PxTLB)
ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SPLIT_TLB, INSN_NOP)
#endif
ldil L%icache_stride, %r1
88: ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r31
#ifdef CONFIG_64BIT
......@@ -893,7 +968,6 @@ ENTRY_CFI(flush_icache_page_asm)
add %r28, %r25, %r25
sub %r25, %r31, %r25
/* fic only has the type 26 form on PA1.1, requiring an
* explicit space specification, so use %sr4 */
1: fic,m %r31(%sr4,%r28)
......@@ -911,16 +985,17 @@ ENTRY_CFI(flush_icache_page_asm)
fic,m %r31(%sr4,%r28)
fic,m %r31(%sr4,%r28)
fic,m %r31(%sr4,%r28)
cmpb,COND(<<) %r28, %r25,1b
cmpb,COND(>>) %r25, %r28, 1b /* predict taken */
fic,m %r31(%sr4,%r28)
89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(flush_icache_page_asm)
ENTRY_CFI(flush_kernel_dcache_page_asm)
ldil L%dcache_stride, %r1
88: ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
#ifdef CONFIG_64BIT
......@@ -931,7 +1006,6 @@ ENTRY_CFI(flush_kernel_dcache_page_asm)
add %r26, %r25, %r25
sub %r25, %r23, %r25
1: fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
......@@ -947,16 +1021,17 @@ ENTRY_CFI(flush_kernel_dcache_page_asm)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
cmpb,COND(<<) %r26, %r25,1b
cmpb,COND(>>) %r25, %r26, 1b /* predict taken */
fdc,m %r23(%r26)
89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(flush_kernel_dcache_page_asm)
ENTRY_CFI(purge_kernel_dcache_page_asm)
ldil L%dcache_stride, %r1
88: ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
#ifdef CONFIG_64BIT
......@@ -982,74 +1057,183 @@ ENTRY_CFI(purge_kernel_dcache_page_asm)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
cmpb,COND(<<) %r26, %r25, 1b
cmpb,COND(>>) %r25, %r26, 1b /* predict taken */
pdc,m %r23(%r26)
89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(purge_kernel_dcache_page_asm)
ENTRY_CFI(flush_user_dcache_range_asm)
ldil L%dcache_stride, %r1
88: ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
1: cmpb,COND(<<),n %r26, %r25, 1b
#ifdef CONFIG_64BIT
depd,z %r23, 59, 60, %r21
#else
depw,z %r23, 27, 28, %r21
#endif
add %r26, %r21, %r22
cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */
1: add %r22, %r21, %r22
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */
fdc,m %r23(%sr3, %r26)
2: cmpb,COND(>>),n %r25, %r26, 2b
fdc,m %r23(%sr3, %r26)
89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(flush_user_dcache_range_asm)
ENTRY_CFI(flush_kernel_dcache_range_asm)
ldil L%dcache_stride, %r1
88: ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
1: cmpb,COND(<<),n %r26, %r25,1b
#ifdef CONFIG_64BIT
depd,z %r23, 59, 60, %r21
#else
depw,z %r23, 27, 28, %r21
#endif
add %r26, %r21, %r22
cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */
1: add %r22, %r21, %r22
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */
fdc,m %r23(%r26)
2: cmpb,COND(>>),n %r25, %r26, 2b /* predict taken */
fdc,m %r23(%r26)
sync
89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
syncdma
bv %r0(%r2)
nop
ENDPROC_CFI(flush_kernel_dcache_range_asm)
ENTRY_CFI(purge_kernel_dcache_range_asm)
ldil L%dcache_stride, %r1
88: ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
1: cmpb,COND(<<),n %r26, %r25,1b
#ifdef CONFIG_64BIT
depd,z %r23, 59, 60, %r21
#else
depw,z %r23, 27, 28, %r21
#endif
add %r26, %r21, %r22
cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */
1: add %r22, %r21, %r22
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */
pdc,m %r23(%r26)
2: cmpb,COND(>>),n %r25, %r26, 2b /* predict taken */
pdc,m %r23(%r26)
sync
89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
syncdma
bv %r0(%r2)
nop
ENDPROC_CFI(purge_kernel_dcache_range_asm)
ENTRY_CFI(flush_user_icache_range_asm)
ldil L%icache_stride, %r1
88: ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
1: cmpb,COND(<<),n %r26, %r25,1b
#ifdef CONFIG_64BIT
depd,z %r23, 59, 60, %r21
#else
depw,z %r23, 27, 28, %r21
#endif
add %r26, %r21, %r22
cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */
1: add %r22, %r21, %r22
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */
fic,m %r23(%sr3, %r26)
2: cmpb,COND(>>),n %r25, %r26, 2b
fic,m %r23(%sr3, %r26)
89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(flush_user_icache_range_asm)
ENTRY_CFI(flush_kernel_icache_page)
ldil L%icache_stride, %r1
88: ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
#ifdef CONFIG_64BIT
......@@ -1076,23 +1260,51 @@ ENTRY_CFI(flush_kernel_icache_page)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
cmpb,COND(<<) %r26, %r25, 1b
cmpb,COND(>>) %r25, %r26, 1b /* predict taken */
fic,m %r23(%sr4, %r26)
89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(flush_kernel_icache_page)
ENTRY_CFI(flush_kernel_icache_range_asm)
ldil L%icache_stride, %r1
88: ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
1: cmpb,COND(<<),n %r26, %r25, 1b
#ifdef CONFIG_64BIT
depd,z %r23, 59, 60, %r21
#else
depw,z %r23, 27, 28, %r21
#endif
add %r26, %r21, %r22
cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */
1: add %r22, %r21, %r22
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */
fic,m %r23(%sr4, %r26)
2: cmpb,COND(>>),n %r25, %r26, 2b /* predict taken */
fic,m %r23(%sr4, %r26)
89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP)
sync
bv %r0(%r2)
nop
......
......@@ -305,6 +305,86 @@ static int __init parisc_init_resources(void)
return 0;
}
static int no_alternatives __initdata;
static int __init setup_no_alternatives(char *str)
{
no_alternatives = 1;
return 1;
}
__setup("no-alternatives", setup_no_alternatives);
static void __init apply_alternatives_all(void)
{
struct alt_instr *entry;
int index = 0, applied = 0;
pr_info("alternatives: %spatching kernel code\n",
no_alternatives ? "NOT " : "");
if (no_alternatives)
return;
set_kernel_text_rw(1);
for (entry = (struct alt_instr *) &__alt_instructions;
entry < (struct alt_instr *) &__alt_instructions_end;
entry++, index++) {
u32 *from, len, cond, replacement;
from = (u32 *)((ulong)&entry->orig_offset + entry->orig_offset);
len = entry->len;
cond = entry->cond;
replacement = entry->replacement;
WARN_ON(!cond);
pr_debug("Check %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n",
index, cond, len, from, replacement);
if ((cond & ALT_COND_NO_SMP) && (num_online_cpus() != 1))
continue;
if ((cond & ALT_COND_NO_DCACHE) && (cache_info.dc_size != 0))
continue;
if ((cond & ALT_COND_NO_ICACHE) && (cache_info.ic_size != 0))
continue;
/*
* If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit
* set (bit #61, big endian), we have to flush and sync every
* time IO-PDIR is changed in Ike/Astro.
*/
if ((cond & ALT_COND_NO_IOC_FDC) &&
(boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC))
continue;
/* Want to replace pdtlb by a pdtlb,l instruction? */
if (replacement == INSN_PxTLB) {
replacement = *from;
if (boot_cpu_data.cpu_type >= pcxu) /* >= pa2.0 ? */
replacement |= (1 << 10); /* set el bit */
}
/*
* Replace instruction with NOPs?
* For long distance insert a branch instruction instead.
*/
if (replacement == INSN_NOP && len > 1)
replacement = 0xe8000002 + (len-2)*8; /* "b,n .+8" */
pr_debug("Do %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n",
index, cond, len, from, replacement);
/* Replace instruction */
*from = replacement;
applied++;
}
pr_info("alternatives: applied %d out of %d patches\n", applied, index);
set_kernel_text_rw(0);
}
extern void gsc_init(void);
extern void processor_init(void);
extern void ccio_init(void);
......@@ -346,6 +426,7 @@ static int __init parisc_init(void)
boot_cpu_data.cpu_hz / 1000000,
boot_cpu_data.cpu_hz % 1000000 );
apply_alternatives_all();
parisc_setup_cache_timing();
/* These are in a non-obvious order, will fix when we have an iotree */
......
......@@ -65,7 +65,6 @@
#define INSN_LDI_R25_1 0x34190002 /* ldi 1,%r25 (in_syscall=1) */
#define INSN_LDI_R20 0x3414015a /* ldi __NR_rt_sigreturn,%r20 */
#define INSN_BLE_SR2_R0 0xe4008200 /* be,l 0x100(%sr2,%r0),%sr0,%r31 */
#define INSN_NOP 0x08000240 /* nop */
/* For debugging */
#define INSN_DIE_HORRIBLY 0x68000ccc /* stw %r0,0x666(%sr0,%r0) */
......
......@@ -640,8 +640,7 @@ cas_action:
sub,<> %r28, %r25, %r0
2: stw %r24, 0(%r26)
/* Free lock */
sync
stw %r20, 0(%sr2,%r20)
stw,ma %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
/* Clear thread register indicator */
stw %r0, 4(%sr2,%r20)
......@@ -655,8 +654,7 @@ cas_action:
3:
/* Error occurred on load or store */
/* Free lock */
sync
stw %r20, 0(%sr2,%r20)
stw,ma %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20)
#endif
......@@ -857,8 +855,7 @@ cas2_action:
cas2_end:
/* Free lock */
sync
stw %r20, 0(%sr2,%r20)
stw,ma %r20, 0(%sr2,%r20)
/* Enable interrupts */
ssm PSW_SM_I, %r0
/* Return to userspace, set no error */
......@@ -868,8 +865,7 @@ cas2_end:
22:
/* Error occurred on load or store */
/* Free lock */
sync
stw %r20, 0(%sr2,%r20)
stw,ma %r20, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
ldo 1(%r0),%r28
b lws_exit
......
......@@ -430,8 +430,8 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o
}
printk("\n");
pr_crit("%s: Code=%d (%s) regs=%p (Addr=" RFMT ")\n",
msg, code, trap_name(code), regs, offset);
pr_crit("%s: Code=%d (%s) at addr " RFMT "\n",
msg, code, trap_name(code), offset);
show_regs(regs);
spin_unlock(&terminate_lock);
......@@ -802,7 +802,8 @@ void __init initialize_ivt(const void *iva)
* the Length/4 words starting at Address is zero.
*/
/* Compute Checksum for HPMC handler */
/* Setup IVA and compute checksum for HPMC handler */
ivap[6] = (u32)__pa(os_hpmc);
length = os_hpmc_size;
ivap[7] = length;
......
......@@ -61,6 +61,12 @@ SECTIONS
EXIT_DATA
}
PERCPU_SECTION(8)
. = ALIGN(4);
.altinstructions : {
__alt_instructions = .;
*(.altinstructions)
__alt_instructions_end = .;
}
. = ALIGN(HUGEPAGE_SIZE);
__init_end = .;
/* freed after init ends here */
......
......@@ -494,12 +494,8 @@ static void __init map_pages(unsigned long start_vaddr,
pte = pte_mkhuge(pte);
}
if (address >= end_paddr) {
if (force)
break;
else
pte_val(pte) = 0;
}
if (address >= end_paddr)
break;
set_pte(pg_table, pte);
......@@ -515,6 +511,21 @@ static void __init map_pages(unsigned long start_vaddr,
}
}
void __init set_kernel_text_rw(int enable_read_write)
{
unsigned long start = (unsigned long)_stext;
unsigned long end = (unsigned long)_etext;
map_pages(start, __pa(start), end-start,
PAGE_KERNEL_RWX, enable_read_write ? 1:0);
/* force the kernel to see the new TLB entries */
__flush_tlb_range(0, start, end);
/* dump old cached instructions */
flush_icache_range(start, end);
}
void __ref free_initmem(void)
{
unsigned long init_begin = (unsigned long)__init_begin;
......
......@@ -8,9 +8,6 @@
obj-$(CONFIG_IOSAPIC) += iosapic.o
obj-$(CONFIG_IOMMU_SBA) += sba_iommu.o
obj-$(CONFIG_PCI_LBA) += lba_pci.o
# Only use one of them: ccio-rm-dma is for PCX-W systems *only*
# obj-$(CONFIG_IOMMU_CCIO) += ccio-rm-dma.o
obj-$(CONFIG_IOMMU_CCIO) += ccio-dma.o
obj-$(CONFIG_GSC) += gsc.o
......
......@@ -609,14 +609,13 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
** PCX-T'? Don't know. (eg C110 or similar K-class)
**
** See PDC_MODEL/option 0/SW_CAP word for "Non-coherent IO-PDIR bit".
** Hopefully we can patch (NOP) these out at boot time somehow.
**
** "Since PCX-U employs an offset hash that is incompatible with
** the real mode coherence index generation of U2, the PDIR entry
** must be flushed to memory to retain coherence."
*/
asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
asm volatile("sync");
asm_io_fdc(pdir_ptr);
asm_io_sync();
}
/**
......@@ -682,17 +681,14 @@ ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
** FIXME: PCX_W platforms don't need FDC/SYNC. (eg C360)
** PCX-U/U+ do. (eg C200/C240)
** See PDC_MODEL/option 0/SW_CAP for "Non-coherent IO-PDIR bit".
**
** Hopefully someone figures out how to patch (NOP) the
** FDC/SYNC out at boot time.
*/
asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr[7]));
asm_io_fdc(pdir_ptr);
iovp += IOVP_SIZE;
byte_cnt -= IOVP_SIZE;
}
asm volatile("sync");
asm_io_sync();
ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), saved_byte_cnt);
}
......
/*
* ccio-rm-dma.c:
* DMA management routines for first generation cache-coherent machines.
* "Real Mode" operation refers to U2/Uturn chip operation. The chip
* can perform coherency checks w/o using the I/O MMU. That's all we
* need until support for more than 4GB phys mem is needed.
*
* This is the trivial case - basically what x86 does.
*
* Drawbacks of using Real Mode are:
* o outbound DMA is slower since one isn't using the prefetching
* U2 can do for outbound DMA.
* o Ability to do scatter/gather in HW is also lost.
* o only known to work with PCX-W processor. (eg C360)
* (PCX-U/U+ are not coherent with U2 in real mode.)
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*
* Original version/author:
* CVSROOT=:pserver:anonymous@198.186.203.37:/cvsroot/linux-parisc
* cvs -z3 co linux/arch/parisc/kernel/dma-rm.c
*
* (C) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
*
*
* Adopted for The Puffin Group's parisc-linux port by Grant Grundler.
* (C) Copyright 2000 Grant Grundler <grundler@puffin.external.hp.com>
*
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/hardware.h>
#include <asm/page.h>
/* Only chose "ccio" since that's what HP-UX calls it....
** Make it easier for folks to migrate from one to the other :^)
*/
#define MODULE_NAME "ccio"
#define U2_IOA_RUNWAY 0x580
#define U2_BC_GSC 0x501
#define UTURN_IOA_RUNWAY 0x581
#define UTURN_BC_GSC 0x502
#define IS_U2(id) ( \
(((id)->hw_type == HPHW_IOA) && ((id)->hversion == U2_IOA_RUNWAY)) || \
(((id)->hw_type == HPHW_BCPORT) && ((id)->hversion == U2_BC_GSC)) \
)
#define IS_UTURN(id) ( \
(((id)->hw_type == HPHW_IOA) && ((id)->hversion == UTURN_IOA_RUNWAY)) || \
(((id)->hw_type == HPHW_BCPORT) && ((id)->hversion == UTURN_BC_GSC)) \
)
static int ccio_dma_supported( struct pci_dev *dev, u64 mask)
{
if (dev == NULL) {
printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
BUG();
return(0);
}
/* only support 32-bit devices (ie PCI/GSC) */
return((int) (mask >= 0xffffffffUL));
}
static void *ccio_alloc_consistent(struct pci_dev *dev, size_t size,
dma_addr_t *handle)
{
void *ret;
ret = (void *)__get_free_pages(GFP_ATOMIC, get_order(size));
if (ret != NULL) {
memset(ret, 0, size);
*handle = virt_to_phys(ret);
}
return ret;
}
static void ccio_free_consistent(struct pci_dev *dev, size_t size,
void *vaddr, dma_addr_t handle)
{
free_pages((unsigned long)vaddr, get_order(size));
}
static dma_addr_t ccio_map_single(struct pci_dev *dev, void *ptr, size_t size,
int direction)
{
return virt_to_phys(ptr);
}
static void ccio_unmap_single(struct pci_dev *dev, dma_addr_t dma_addr,
size_t size, int direction)
{
/* Nothing to do */
}
static int ccio_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction)
{
int tmp = nents;
/* KISS: map each buffer separately. */
while (nents) {
sg_dma_address(sglist) = ccio_map_single(dev, sglist->address, sglist->length, direction);
sg_dma_len(sglist) = sglist->length;
nents--;
sglist++;
}
return tmp;
}
static void ccio_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction)
{
#if 0
while (nents) {
ccio_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction);
nents--;
sglist++;
}
return;
#else
/* Do nothing (copied from current ccio_unmap_single() :^) */
#endif
}
static struct pci_dma_ops ccio_ops = {
ccio_dma_supported,
ccio_alloc_consistent,
ccio_free_consistent,
ccio_map_single,
ccio_unmap_single,
ccio_map_sg,
ccio_unmap_sg,
NULL, /* dma_sync_single_for_cpu : NOP for U2 */
NULL, /* dma_sync_single_for_device : NOP for U2 */
NULL, /* dma_sync_sg_for_cpu : ditto */
NULL, /* dma_sync_sg_for_device : ditto */
};
/*
** Determine if u2 should claim this chip (return 0) or not (return 1).
** If so, initialize the chip and tell other partners in crime they
** have work to do.
*/
static int __init
ccio_probe(struct parisc_device *dev)
{
printk(KERN_INFO "%s found %s at 0x%lx\n", MODULE_NAME,
dev->id.hversion == U2_BC_GSC ? "U2" : "UTurn",
dev->hpa.start);
/*
** FIXME - should check U2 registers to verify it's really running
** in "Real Mode".
*/
#if 0
/* will need this for "Virtual Mode" operation */
ccio_hw_init(ccio_dev);
ccio_common_init(ccio_dev);
#endif
hppa_dma_ops = &ccio_ops;
return 0;
}
static const struct parisc_device_id ccio_tbl[] __initconst = {
{ HPHW_BCPORT, HVERSION_REV_ANY_ID, U2_BC_GSC, 0xc },
{ HPHW_BCPORT, HVERSION_REV_ANY_ID, UTURN_BC_GSC, 0xc },
{ 0, }
};
static struct parisc_driver ccio_driver __refdata = {
.name = "U2/Uturn",
.id_table = ccio_tbl,
.probe = ccio_probe,
};
void __init ccio_init(void)
{
register_parisc_driver(&ccio_driver);
}
......@@ -382,7 +382,7 @@ static irqreturn_t dino_isr(int irq, void *intr_dev)
DBG(KERN_DEBUG "%s(%d, %p) mask 0x%x\n",
__func__, irq, intr_dev, mask);
generic_handle_irq(irq);
mask &= ~(1 << local_irq);
mask &= ~DINO_MASK_IRQ(local_irq);
} while (mask);
/* Support for level triggered IRQ lines.
......@@ -396,9 +396,8 @@ static irqreturn_t dino_isr(int irq, void *intr_dev)
if (mask) {
if (--ilr_loop > 0)
goto ilr_again;
printk(KERN_ERR "Dino 0x%px: stuck interrupt %d\n",
pr_warn_ratelimited("Dino 0x%px: stuck interrupt %d\n",
dino_dev->hba.base_addr, mask);
return IRQ_NONE;
}
return IRQ_HANDLED;
}
......
......@@ -587,8 +587,7 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
* (bit #61, big endian), we have to flush and sync every time
* IO-PDIR is changed in Ike/Astro.
*/
if (ioc_needs_fdc)
asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
asm_io_fdc(pdir_ptr);
}
......@@ -641,8 +640,8 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
do {
/* clear I/O Pdir entry "valid" bit first */
((u8 *) pdir_ptr)[7] = 0;
asm_io_fdc(pdir_ptr);
if (ioc_needs_fdc) {
asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
#if 0
entries_per_cacheline = L1_CACHE_SHIFT - 3;
#endif
......@@ -661,8 +660,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
** could dump core on HPMC.
*/
((u8 *) pdir_ptr)[7] = 0;
if (ioc_needs_fdc)
asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
asm_io_fdc(pdir_ptr);
WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM);
}
......@@ -773,8 +771,7 @@ sba_map_single(struct device *dev, void *addr, size_t size,
}
/* force FDC ops in io_pdir_entry() to be visible to IOMMU */
if (ioc_needs_fdc)
asm volatile("sync" : : );
asm_io_sync();
#ifdef ASSERT_PDIR_SANITY
sba_check_pdir(ioc,"Check after sba_map_single()");
......@@ -858,8 +855,7 @@ sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
sba_free_range(ioc, iova, size);
/* If fdc's were issued, force fdc's to be visible now */
if (ioc_needs_fdc)
asm volatile("sync" : : );
asm_io_sync();
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
#endif /* DELAYED_RESOURCE_CNT == 0 */
......@@ -1008,8 +1004,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry);
/* force FDC ops in io_pdir_entry() to be visible to IOMMU */
if (ioc_needs_fdc)
asm volatile("sync" : : );
asm_io_sync();
#ifdef ASSERT_PDIR_SANITY
if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
......
......@@ -48,9 +48,6 @@ fi
tmp=$(mktemp /tmp/vmlinux-XXX)
trap "rm -f $tmp" 0
# Initial attempt for uncompressed images or objects:
check_vmlinux $img
# That didn't work, so retry after decompression.
try_decompress '\037\213\010' xy gunzip
try_decompress '\3757zXZ\000' abcde unxz
......@@ -60,5 +57,8 @@ try_decompress '\211\114\132' xy 'lzop -d'
try_decompress '\002!L\030' xxx 'lz4 -d'
try_decompress '(\265/\375' xxx unzstd
# Finally check for uncompressed images or objects:
check_vmlinux $img
# Bail out:
echo "$me: Cannot find vmlinux." >&2
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment