Commit 3eb51486 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arc-5.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc

Pull ARC updates from Vineet Gupta:

 - long due rewrite of do_page_fault

 - refactoring of entry/exit code to utilize the double load/store
   instructions

 - hsdk platform updates

* tag 'arc-5.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
  ARC: [plat-hsdk]: Enable AXI DW DMAC in defconfig
  ARC: [plat-hsdk]: enable DW SPI controller
  ARC: hide unused function unw_hdr_alloc
  ARC: [haps] Add Virtio support
  ARCv2: entry: simplify return to Delay Slot via interrupt
  ARC: entry: EV_Trap expects r10 (vs. r9) to have exception cause
  ARCv2: entry: rewrite to enable use of double load/stores LDD/STD
  ARCv2: entry: avoid a branch
  ARCv2: entry: push out the Z flag unclobber from common EXCEPTION_PROLOGUE
  ARCv2: entry: comments about hardware auto-save on taken interrupts
  ARC: mm: do_page_fault refactor #8: release mmap_sem sooner
  ARC: mm: do_page_fault refactor #7: fold the various error handling
  ARC: mm: do_page_fault refactor #6: error handlers to use same pattern
  ARC: mm: do_page_fault refactor #5: scoot no_context to end
  ARC: mm: do_page_fault refactor #4: consolidate retry related logic
  ARC: mm: do_page_fault refactor #3: tidyup vma access permission code
  ARC: mm: do_page_fault refactor #2: remove short lived variable
  ARC: mm: do_page_fault refactor #1: remove label @good_area
parents c309b6f2 24a20b0a
......@@ -62,5 +62,35 @@ arcpct0: pct {
#interrupt-cells = <1>;
interrupts = <20>;
};
virtio0: virtio@f0100000 {
compatible = "virtio,mmio";
reg = <0xf0100000 0x2000>;
interrupts = <31>;
};
virtio1: virtio@f0102000 {
compatible = "virtio,mmio";
reg = <0xf0102000 0x2000>;
interrupts = <32>;
};
virtio2: virtio@f0104000 {
compatible = "virtio,mmio";
reg = <0xf0104000 0x2000>;
interrupts = <33>;
};
virtio3: virtio@f0106000 {
compatible = "virtio,mmio";
reg = <0xf0106000 0x2000>;
interrupts = <34>;
};
virtio4: virtio@f0108000 {
compatible = "virtio,mmio";
reg = <0xf0108000 0x2000>;
interrupts = <35>;
};
};
};
......@@ -8,6 +8,7 @@
*/
/dts-v1/;
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/reset/snps,hsdk-reset.h>
/ {
......@@ -252,6 +253,19 @@ mmc@a000 {
dma-coherent;
};
spi0: spi@20000 {
compatible = "snps,dw-apb-ssi";
reg = <0x20000 0x100>;
#address-cells = <1>;
#size-cells = <0>;
interrupts = <16>;
num-cs = <2>;
reg-io-width = <4>;
clocks = <&input_clk>;
cs-gpios = <&creg_gpio 0 GPIO_ACTIVE_LOW>,
<&creg_gpio 1 GPIO_ACTIVE_LOW>;
};
creg_gpio: gpio@14b0 {
compatible = "snps,creg-gpio-hsdk";
reg = <0x14b0 0x4>;
......
......@@ -35,10 +35,12 @@ CONFIG_INET=y
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_BLK_DEV is not set
CONFIG_VIRTIO_BLK=y
CONFIG_NETDEVICES=y
CONFIG_VIRTIO_NET=y
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_INTEL is not set
......@@ -68,6 +70,7 @@ CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_VIRTIO_MMIO=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
......
......@@ -46,6 +46,9 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_DW=y
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_SPI=y
CONFIG_SPI_DESIGNWARE=y
CONFIG_SPI_DW_MMIO=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_DWAPB=y
......@@ -66,6 +69,8 @@ CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_DW=y
CONFIG_DMADEVICES=y
CONFIG_DW_AXI_DMAC=y
CONFIG_EXT3_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
......
This diff is collapsed.
......@@ -195,8 +195,8 @@
PUSHAX CTOP_AUX_EFLAGS
#endif
lr r9, [ecr]
st r9, [sp, PT_event] /* EV_Trap expects r9 to have ECR */
lr r10, [ecr]
st r10, [sp, PT_event] /* EV_Trap expects r10 to have ECR */
.endm
/*--------------------------------------------------------------
......
......@@ -10,6 +10,24 @@
#ifdef __ASSEMBLY__
.macro ST2 e, o, off
#ifdef CONFIG_ARC_HAS_LL64
std \e, [sp, \off]
#else
st \e, [sp, \off]
st \o, [sp, \off+4]
#endif
.endm
.macro LD2 e, o, off
#ifdef CONFIG_ARC_HAS_LL64
ldd \e, [sp, \off]
#else
ld \e, [sp, \off]
ld \o, [sp, \off+4]
#endif
.endm
#define ASM_NL ` /* use '`' to mark new line in macro */
/* annotation for data we want in DCCM - if enabled in .config */
......
......@@ -55,7 +55,14 @@ int main(void)
DEFINE(PT_r5, offsetof(struct pt_regs, r5));
DEFINE(PT_r6, offsetof(struct pt_regs, r6));
DEFINE(PT_r7, offsetof(struct pt_regs, r7));
DEFINE(PT_r8, offsetof(struct pt_regs, r8));
DEFINE(PT_r10, offsetof(struct pt_regs, r10));
DEFINE(PT_r26, offsetof(struct pt_regs, r26));
DEFINE(PT_ret, offsetof(struct pt_regs, ret));
DEFINE(PT_blink, offsetof(struct pt_regs, blink));
DEFINE(PT_lpe, offsetof(struct pt_regs, lp_end));
DEFINE(PT_lpc, offsetof(struct pt_regs, lp_count));
DEFINE(PT_user_r25, offsetof(struct pt_regs, user_r25));
DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
......
......@@ -67,7 +67,7 @@ reserved:
ENTRY(handle_interrupt)
INTERRUPT_PROLOGUE irq
INTERRUPT_PROLOGUE
# irq control APIs local_irq_save/restore/disable/enable fiddle with
# global interrupt enable bits in STATUS32 (.IE for 1 prio, .E[] for 2 prio)
......@@ -79,7 +79,7 @@ ENTRY(handle_interrupt)
#
# Note this disable is only for consistent book-keeping as further interrupts
# will be disabled anyways even w/o this. Hardware tracks active interrupts
# seperately in AUX_IRQ_ACTIVE.active and will not take new interrupts
# seperately in AUX_IRQ_ACT.active and will not take new interrupts
# unless this one returns (or higher prio becomes pending in 2-prio scheme)
IRQ_DISABLE
......@@ -200,17 +200,18 @@ restore_regs:
ld r0, [sp, PT_status32] ; U/K mode at time of entry
lr r10, [AUX_IRQ_ACT]
bmsk r11, r10, 15 ; AUX_IRQ_ACT.ACTIVE
bmsk r11, r10, 15 ; extract AUX_IRQ_ACT.active
breq r11, 0, .Lexcept_ret ; No intr active, ret from Exception
;####### Return from Intr #######
.Lisr_ret:
debug_marker_l1:
; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot
btst r0, STATUS_DE_BIT ; Z flag set if bit clear
bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set
.Lisr_ret_fast_path:
; Handle special case #1: (Entry via Exception, Return via IRQ)
;
; Exception in U mode, preempted in kernel, Intr taken (K mode), orig
......@@ -223,7 +224,7 @@ debug_marker_l1:
bset.nz r11, r11, AUX_IRQ_ACT_BIT_U ; NZ means U
sr r11, [AUX_IRQ_ACT]
INTERRUPT_EPILOGUE irq
INTERRUPT_EPILOGUE
rtie
;####### Return from Exception / pure kernel mode #######
......@@ -244,8 +245,8 @@ debug_marker_syscall:
;
; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround
;
; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline
; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly
; Solution is to drop out of interrupt context into pure kernel mode
; and return from pure kernel mode which does right things for delay slot
.Lintr_ret_to_delay_slot:
debug_marker_ds:
......@@ -254,48 +255,9 @@ debug_marker_ds:
add r2, r2, 1
st r2, [@intr_to_DE_cnt]
ld r2, [sp, PT_ret]
ld r3, [sp, PT_status32]
; STAT32 for Int return created from scratch
; (No delay dlot, disable Further intr in trampoline)
bic r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK
st r0, [sp, PT_status32]
mov r1, .Lintr_ret_to_delay_slot_2
st r1, [sp, PT_ret]
; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots
st r2, [sp, 0]
st r3, [sp, 4]
b .Lisr_ret_fast_path
.Lintr_ret_to_delay_slot_2:
; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP
sub sp, sp, SZ_PT_REGS
st r9, [sp, -4]
ld r9, [sp, 0]
sr r9, [eret]
ld r9, [sp, 4]
sr r9, [erstatus]
; restore AUX_USER_SP if returning to U mode
bbit0 r9, STATUS_U_BIT, 1f
ld r9, [sp, PT_sp]
sr r9, [AUX_USER_SP]
1:
ld r9, [sp, 8]
sr r9, [erbta]
ld r9, [sp, -4]
add sp, sp, SZ_PT_REGS
; return from pure kernel mode to delay slot
rtie
; drop out of interrupt context (clear AUX_IRQ_ACT.active)
bmskn r11, r10, 15
sr r11, [AUX_IRQ_ACT]
b .Lexcept_ret
END(ret_from_exception)
......@@ -256,7 +256,7 @@ ENTRY(EV_TLBProtV)
EXCEPTION_PROLOGUE
mov r2, r9 ; ECR set into r9 already
mov r2, r10 ; ECR set into r10 already
lr r0, [efa] ; Faulting Data address (not part of pt_regs saved above)
; Exception auto-disables further Intr/exceptions.
......
......@@ -232,8 +232,8 @@ ENTRY(EV_Trap)
EXCEPTION_PROLOGUE
;============ TRAP 1 :breakpoints
; Check ECR for trap with arg (PROLOGUE ensures r9 has ECR)
bmsk.f 0, r9, 7
; Check ECR for trap with arg (PROLOGUE ensures r10 has ECR)
bmsk.f 0, r10, 7
bnz trap_with_param
;============ TRAP (no param): syscall top level
......
......@@ -181,11 +181,6 @@ static void *__init unw_hdr_alloc_early(unsigned long sz)
return memblock_alloc_from(sz, sizeof(unsigned int), MAX_DMA_ADDRESS);
}
static void *unw_hdr_alloc(unsigned long sz)
{
return kmalloc(sz, GFP_KERNEL);
}
static void init_unwind_table(struct unwind_table *table, const char *name,
const void *core_start, unsigned long core_size,
const void *init_start, unsigned long init_size,
......@@ -366,6 +361,10 @@ static void init_unwind_hdr(struct unwind_table *table,
}
#ifdef CONFIG_MODULES
static void *unw_hdr_alloc(unsigned long sz)
{
return kmalloc(sz, GFP_KERNEL);
}
static struct unwind_table *last_table;
......
......@@ -63,24 +63,19 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
struct vm_area_struct *vma = NULL;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
int si_code = SEGV_MAPERR;
int ret;
vm_fault_t fault;
int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
int sig, si_code = SEGV_MAPERR;
unsigned int write = 0, exec = 0, mask;
vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */
unsigned int flags; /* handle_mm_fault() input */
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*/
if (address >= VMALLOC_START && !user_mode(regs)) {
ret = handle_kernel_vaddr_fault(address);
if (unlikely(ret))
if (unlikely(handle_kernel_vaddr_fault(address)))
goto no_context;
else
return;
......@@ -93,143 +88,117 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
if (faulthandler_disabled() || !mm)
goto no_context;
if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */
write = 1;
else if ((regs->ecr_vec == ECR_V_PROTV) &&
(regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
exec = 1;
flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
if (write)
flags |= FAULT_FLAG_WRITE;
retry:
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
if (unlikely(address < vma->vm_start)) {
if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
goto bad_area;
}
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
* vm_area is good, now check permissions for this memory access
*/
good_area:
si_code = SEGV_ACCERR;
/* Handle protection violation, execute on heap or stack */
if ((regs->ecr_vec == ECR_V_PROTV) &&
(regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
mask = VM_READ;
if (write)
mask = VM_WRITE;
if (exec)
mask = VM_EXEC;
if (!(vma->vm_flags & mask)) {
si_code = SEGV_ACCERR;
goto bad_area;
if (write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
flags |= FAULT_FLAG_WRITE;
} else {
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(vma, address, flags);
if (fatal_signal_pending(current)) {
/*
* Fault retry nuances
*/
if (unlikely(fault & VM_FAULT_RETRY)) {
/*
* if fault retry, mmap_sem already relinquished by core mm
* so OK to return to user mode (with signal handled first)
* If fault needs to be retried, handle any pending signals
* first (by returning to user mode).
* mmap_sem already relinquished by core mm for RETRY case
*/
if (fault & VM_FAULT_RETRY) {
if (fatal_signal_pending(current)) {
if (!user_mode(regs))
goto no_context;
return;
}
}
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (likely(!(fault & VM_FAULT_ERROR))) {
/*
* retry state machine
*/
if (flags & FAULT_FLAG_ALLOW_RETRY) {
/* To avoid updating stats twice for retry case */
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}
if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
goto retry;
}
flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
goto retry;
}
/* Fault Handled Gracefully */
up_read(&mm->mmap_sem);
return;
}
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
/* no man's land */
BUG();
bad_area:
up_read(&mm->mmap_sem);
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
* Major/minor page fault accounting
* (in case of retry we only land here once)
*/
bad_area:
up_read(&mm->mmap_sem);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
tsk->thread.fault_address = address;
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
return;
}
if (likely(!(fault & VM_FAULT_ERROR))) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}
no_context:
/* Are we prepared to handle this kernel fault?
*
* (The kernel has valid exception-points in the source
* when it accesses user-memory. When it fails in one
* of those points, we find it in a table and do a jump
* to some fixup code that loads an appropriate error
* code)
*/
if (fixup_exception(regs))
/* Normal return path: fault Handled Gracefully */
return;
}
die("Oops", regs, address);
out_of_memory:
up_read(&mm->mmap_sem);
if (!user_mode(regs))
goto no_context;
if (user_mode(regs)) {
if (fault & VM_FAULT_OOM) {
pagefault_out_of_memory();
return;
}
goto no_context;
if (fault & VM_FAULT_SIGBUS) {
sig = SIGBUS;
si_code = BUS_ADRERR;
}
else {
sig = SIGSEGV;
}
do_sigbus:
up_read(&mm->mmap_sem);
tsk->thread.fault_address = address;
force_sig_fault(sig, si_code, (void __user *)address);
return;
if (!user_mode(regs))
goto no_context;
no_context:
if (fixup_exception(regs))
return;
tsk->thread.fault_address = address;
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
die("Oops", regs, address);
}
......@@ -393,6 +393,17 @@ EV_TLBMissD_fast_ret: ; additional label for VDK OS-kit instrumentation
;-------- Common routine to call Linux Page Fault Handler -----------
do_slow_path_pf:
#ifdef CONFIG_ISA_ARCV2
; Set Z flag if exception in U mode. Hardware micro-ops do this on any
; taken interrupt/exception, and thus is already the case at the entry
; above, but ensuing code would have already clobbered.
; EXCEPTION_PROLOGUE called in slow path, relies on correct Z flag set
lr r2, [erstatus]
and r2, r2, STATUS_U_MASK
bxor.f 0, r2, STATUS_U_BIT
#endif
; Restore the 4-scratch regs saved by fast path miss handler
TLBMISS_RESTORE_REGS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment