Commit 1ccfd5ea authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull first batch of s390 updates from Martin Schwidefsky:
 "The most interesting change is that Martin converted s390 to generic
  hardirqs.  Which means that all current architectures have been
  converted and that CONFIG_GENERIC_HARDIRQS can be removed.  Martin
  prepared a patch for that already (see genirq branch), but the best
  time to merge that is probably at the end of the merge window / begin
  of -rc1.

  Another patch converts s390 to software referenced bits instead of
  relying on the reference bit in the storage key.  Therefore s390
  doesn't use storage keys anymore, except for kvm.

  Besides that we have improvements, cleanups and fixes in PCI, DASD and
  all over the place."

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (32 commits)
  s390/pci: use virtual memory for iommu bitmap
  s390/cio: fix unlocked access of global bitmap
  s390/pci: update function handle after resume from hibernate
  s390/pci: try harder to modify a function
  s390/pci: split lpf
  s390/hibernate: add early resume function
  s390/pci: add recover sysfs knob
  s390/pci: use claim_resource
  s390/pci/hotplug: convert to be builtin only
  s390/mm: implement software referenced bits
  s390/dasd: fix statistics for recovered requests
  s390/tx: allow program interruption filtering in user space
  s390/pgtable: fix mprotect for single-threaded KVM guests
  s390/time: return with irqs disabled from psw_idle
  s390/kprobes: add support for compare and branch instructions
  s390/switch_to: fix save_access_regs() / restore_access_regs()
  s390/bitops: fix inline assembly constraints
  s390/dasd: enable raw_track_access reads without direct I/O
  s390/mm: introduce ptep_flush_lazy helper
  s390/time: clock comparator revalidation
  ...
parents ea98af13 22459321
...@@ -116,6 +116,7 @@ config S390 ...@@ -116,6 +116,7 @@ config S390
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_GENERIC_HARDIRQS
select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4 select HAVE_KERNEL_LZ4
...@@ -445,6 +446,16 @@ config PCI_NR_FUNCTIONS ...@@ -445,6 +446,16 @@ config PCI_NR_FUNCTIONS
This allows you to specify the maximum number of PCI functions which This allows you to specify the maximum number of PCI functions which
this kernel will support. this kernel will support.
config PCI_NR_MSI
int "Maximum number of MSI interrupts (64-32768)"
range 64 32768
default "256"
help
This defines the number of virtual interrupts the kernel will
provide for MSI interrupts. If you configure your system to have
too few drivers will fail to allocate MSI interrupts for all
PCI devices.
source "drivers/pci/Kconfig" source "drivers/pci/Kconfig"
source "drivers/pci/pcie/Kconfig" source "drivers/pci/pcie/Kconfig"
source "drivers/pci/hotplug/Kconfig" source "drivers/pci/hotplug/Kconfig"
......
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
#ifndef _ASM_S390_AIRQ_H #ifndef _ASM_S390_AIRQ_H
#define _ASM_S390_AIRQ_H #define _ASM_S390_AIRQ_H
#include <linux/bit_spinlock.h>
struct airq_struct { struct airq_struct {
struct hlist_node list; /* Handler queueing. */ struct hlist_node list; /* Handler queueing. */
void (*handler)(struct airq_struct *); /* Thin-interrupt handler */ void (*handler)(struct airq_struct *); /* Thin-interrupt handler */
...@@ -23,4 +25,69 @@ struct airq_struct { ...@@ -23,4 +25,69 @@ struct airq_struct {
int register_adapter_interrupt(struct airq_struct *airq); int register_adapter_interrupt(struct airq_struct *airq);
void unregister_adapter_interrupt(struct airq_struct *airq); void unregister_adapter_interrupt(struct airq_struct *airq);
/* Adapter interrupt bit vector */
struct airq_iv {
unsigned long *vector; /* Adapter interrupt bit vector */
unsigned long *avail; /* Allocation bit mask for the bit vector */
unsigned long *bitlock; /* Lock bit mask for the bit vector */
unsigned long *ptr; /* Pointer associated with each bit */
unsigned int *data; /* 32 bit value associated with each bit */
unsigned long bits; /* Number of bits in the vector */
unsigned long end; /* Number of highest allocated bit + 1 */
spinlock_t lock; /* Lock to protect alloc & free */
};
#define AIRQ_IV_ALLOC 1 /* Use an allocation bit mask */
#define AIRQ_IV_BITLOCK 2 /* Allocate the lock bit mask */
#define AIRQ_IV_PTR 4 /* Allocate the ptr array */
#define AIRQ_IV_DATA 8 /* Allocate the data array */
struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags);
void airq_iv_release(struct airq_iv *iv);
unsigned long airq_iv_alloc_bit(struct airq_iv *iv);
void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit);
unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
unsigned long end);
static inline unsigned long airq_iv_end(struct airq_iv *iv)
{
return iv->end;
}
static inline void airq_iv_lock(struct airq_iv *iv, unsigned long bit)
{
const unsigned long be_to_le = BITS_PER_LONG - 1;
bit_spin_lock(bit ^ be_to_le, iv->bitlock);
}
static inline void airq_iv_unlock(struct airq_iv *iv, unsigned long bit)
{
const unsigned long be_to_le = BITS_PER_LONG - 1;
bit_spin_unlock(bit ^ be_to_le, iv->bitlock);
}
static inline void airq_iv_set_data(struct airq_iv *iv, unsigned long bit,
unsigned int data)
{
iv->data[bit] = data;
}
static inline unsigned int airq_iv_get_data(struct airq_iv *iv,
unsigned long bit)
{
return iv->data[bit];
}
static inline void airq_iv_set_ptr(struct airq_iv *iv, unsigned long bit,
unsigned long ptr)
{
iv->ptr[bit] = ptr;
}
static inline unsigned long airq_iv_get_ptr(struct airq_iv *iv,
unsigned long bit)
{
return iv->ptr[bit];
}
#endif /* _ASM_S390_AIRQ_H */ #endif /* _ASM_S390_AIRQ_H */
...@@ -216,7 +216,7 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr) ...@@ -216,7 +216,7 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
asm volatile( asm volatile(
" oc %O0(1,%R0),%1" " oc %O0(1,%R0),%1"
: "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
} }
static inline void static inline void
...@@ -244,7 +244,7 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr) ...@@ -244,7 +244,7 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
asm volatile( asm volatile(
" nc %O0(1,%R0),%1" " nc %O0(1,%R0),%1"
: "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" ); : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc");
} }
static inline void static inline void
...@@ -271,7 +271,7 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr) ...@@ -271,7 +271,7 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
asm volatile( asm volatile(
" xc %O0(1,%R0),%1" " xc %O0(1,%R0),%1"
: "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
} }
static inline void static inline void
...@@ -301,7 +301,7 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr) ...@@ -301,7 +301,7 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
ch = *(unsigned char *) addr; ch = *(unsigned char *) addr;
asm volatile( asm volatile(
" oc %O0(1,%R0),%1" " oc %O0(1,%R0),%1"
: "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
: "cc", "memory"); : "cc", "memory");
return (ch >> (nr & 7)) & 1; return (ch >> (nr & 7)) & 1;
} }
...@@ -320,7 +320,7 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr) ...@@ -320,7 +320,7 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
ch = *(unsigned char *) addr; ch = *(unsigned char *) addr;
asm volatile( asm volatile(
" nc %O0(1,%R0),%1" " nc %O0(1,%R0),%1"
: "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
: "cc", "memory"); : "cc", "memory");
return (ch >> (nr & 7)) & 1; return (ch >> (nr & 7)) & 1;
} }
...@@ -339,7 +339,7 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr) ...@@ -339,7 +339,7 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
ch = *(unsigned char *) addr; ch = *(unsigned char *) addr;
asm volatile( asm volatile(
" xc %O0(1,%R0),%1" " xc %O0(1,%R0),%1"
: "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
: "cc", "memory"); : "cc", "memory");
return (ch >> (nr & 7)) & 1; return (ch >> (nr & 7)) & 1;
} }
......
...@@ -296,6 +296,7 @@ static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1, ...@@ -296,6 +296,7 @@ static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1,
return 0; return 0;
} }
void channel_subsystem_reinit(void);
extern void css_schedule_reprobe(void); extern void css_schedule_reprobe(void);
extern void reipl_ccw_dev(struct ccw_dev_id *id); extern void reipl_ccw_dev(struct ccw_dev_id *id);
......
...@@ -20,4 +20,9 @@ ...@@ -20,4 +20,9 @@
#define HARDIRQ_BITS 8 #define HARDIRQ_BITS 8
static inline void ack_bad_irq(unsigned int irq)
{
printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
}
#endif /* __ASM_HARDIRQ_H */ #endif /* __ASM_HARDIRQ_H */
...@@ -17,6 +17,9 @@ ...@@ -17,6 +17,9 @@
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte); pte_t *ptep, pte_t pte);
pte_t huge_ptep_get(pte_t *ptep);
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
/* /*
* If the arch doesn't supply something else, assume that hugepage * If the arch doesn't supply something else, assume that hugepage
...@@ -38,147 +41,75 @@ static inline int prepare_hugepage_range(struct file *file, ...@@ -38,147 +41,75 @@ static inline int prepare_hugepage_range(struct file *file,
int arch_prepare_hugepage(struct page *page); int arch_prepare_hugepage(struct page *page);
void arch_release_hugepage(struct page *page); void arch_release_hugepage(struct page *page);
static inline pte_t huge_pte_wrprotect(pte_t pte) static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{ {
pte_val(pte) |= _PAGE_RO; pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY;
return pte;
} }
static inline int huge_pte_none(pte_t pte) static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{ {
return (pte_val(pte) & _SEGMENT_ENTRY_INV) && huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
!(pte_val(pte) & _SEGMENT_ENTRY_RO);
} }
static inline pte_t huge_ptep_get(pte_t *ptep) static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{ {
pte_t pte = *ptep; int changed = !pte_same(huge_ptep_get(ptep), pte);
unsigned long mask; if (changed) {
huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
if (!MACHINE_HAS_HPAGE) { set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN);
if (ptep) {
mask = pte_val(pte) &
(_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
pte = pte_mkhuge(*ptep);
pte_val(pte) |= mask;
}
} }
return pte; return changed;
} }
static inline void __pmd_csp(pmd_t *pmdp) static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{ {
register unsigned long reg2 asm("2") = pmd_val(*pmdp); pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
register unsigned long reg3 asm("3") = pmd_val(*pmdp) | set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
_SEGMENT_ENTRY_INV;
register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
asm volatile(
" csp %1,%3"
: "=m" (*pmdp)
: "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
} }
static inline void huge_ptep_invalidate(struct mm_struct *mm, static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
unsigned long address, pte_t *ptep)
{
pmd_t *pmdp = (pmd_t *) ptep;
if (MACHINE_HAS_IDTE)
__pmd_idte(address, pmdp);
else
__pmd_csp(pmdp);
pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
}
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t pte = huge_ptep_get(ptep);
huge_ptep_invalidate(mm, addr, ptep);
return pte;
}
#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
({ \
int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \
if (__changed) { \
huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
} \
__changed; \
})
#define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \
({ \
pte_t __pte = huge_ptep_get(__ptep); \
if (huge_pte_write(__pte)) { \
huge_ptep_invalidate(__mm, __addr, __ptep); \
set_huge_pte_at(__mm, __addr, __ptep, \
huge_pte_wrprotect(__pte)); \
} \
})
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{ {
huge_ptep_invalidate(vma->vm_mm, address, ptep); return mk_pte(page, pgprot);
} }
static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) static inline int huge_pte_none(pte_t pte)
{ {
pte_t pte; return pte_none(pte);
pmd_t pmd;
pmd = mk_pmd_phys(page_to_phys(page), pgprot);
pte_val(pte) = pmd_val(pmd);
return pte;
} }
static inline int huge_pte_write(pte_t pte) static inline int huge_pte_write(pte_t pte)
{ {
pmd_t pmd; return pte_write(pte);
pmd_val(pmd) = pte_val(pte);
return pmd_write(pmd);
} }
static inline int huge_pte_dirty(pte_t pte) static inline int huge_pte_dirty(pte_t pte)
{ {
/* No dirty bit in the segment table entry. */ return pte_dirty(pte);
return 0;
} }
static inline pte_t huge_pte_mkwrite(pte_t pte) static inline pte_t huge_pte_mkwrite(pte_t pte)
{ {
pmd_t pmd; return pte_mkwrite(pte);
pmd_val(pmd) = pte_val(pte);
pte_val(pte) = pmd_val(pmd_mkwrite(pmd));
return pte;
} }
static inline pte_t huge_pte_mkdirty(pte_t pte) static inline pte_t huge_pte_mkdirty(pte_t pte)
{ {
/* No dirty bit in the segment table entry. */ return pte_mkdirty(pte);
return pte;
} }
static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) static inline pte_t huge_pte_wrprotect(pte_t pte)
{ {
pmd_t pmd; return pte_wrprotect(pte);
pmd_val(pmd) = pte_val(pte);
pte_val(pte) = pmd_val(pmd_modify(pmd, newprot));
return pte;
} }
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
pte_t *ptep)
{ {
pmd_clear((pmd_t *) ptep); return pte_modify(pte, newprot);
} }
#endif /* _ASM_S390_HUGETLB_H */ #endif /* _ASM_S390_HUGETLB_H */
...@@ -4,19 +4,8 @@ ...@@ -4,19 +4,8 @@
#include <linux/msi.h> #include <linux/msi.h>
#include <linux/pci.h> #include <linux/pci.h>
static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) void __init init_airq_interrupts(void);
{ void __init init_cio_interrupts(void);
return __irq_get_msi_desc(irq); void __init init_ext_interrupts(void);
}
/* Must be called with msi map lock held */
static inline int irq_set_msi_desc(unsigned int irq, struct msi_desc *msi)
{
if (!msi)
return -EINVAL;
msi->irq = irq;
return 0;
}
#endif #endif
#ifndef _ASM_IRQ_H #ifndef _ASM_IRQ_H
#define _ASM_IRQ_H #define _ASM_IRQ_H
#define EXT_INTERRUPT 1
#define IO_INTERRUPT 2
#define THIN_INTERRUPT 3
#define NR_IRQS_BASE 4
#ifdef CONFIG_PCI_NR_MSI
# define NR_IRQS (NR_IRQS_BASE + CONFIG_PCI_NR_MSI)
#else
# define NR_IRQS NR_IRQS_BASE
#endif
/* This number is used when no interrupt has been assigned */
#define NO_IRQ 0
#ifndef __ASSEMBLY__
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/types.h> #include <linux/types.h>
enum interruption_main_class {
EXTERNAL_INTERRUPT,
IO_INTERRUPT,
NR_IRQS
};
enum interruption_class { enum interruption_class {
IRQEXT_CLK, IRQEXT_CLK,
IRQEXT_EXC, IRQEXT_EXC,
...@@ -72,14 +83,8 @@ void service_subclass_irq_unregister(void); ...@@ -72,14 +83,8 @@ void service_subclass_irq_unregister(void);
void measurement_alert_subclass_register(void); void measurement_alert_subclass_register(void);
void measurement_alert_subclass_unregister(void); void measurement_alert_subclass_unregister(void);
#ifdef CONFIG_LOCKDEP #define irq_canonicalize(irq) (irq)
# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
# define disable_irq_nosync_lockdep_irqsave(irq, flags) \ #endif /* __ASSEMBLY__ */
disable_irq_nosync(irq)
# define disable_irq_lockdep(irq) disable_irq(irq)
# define enable_irq_lockdep(irq) enable_irq(irq)
# define enable_irq_lockdep_irqrestore(irq, flags) \
enable_irq(irq)
#endif
#endif /* _ASM_IRQ_H */ #endif /* _ASM_IRQ_H */
...@@ -77,8 +77,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -77,8 +77,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
WARN_ON(atomic_read(&prev->context.attach_count) < 0); WARN_ON(atomic_read(&prev->context.attach_count) < 0);
atomic_inc(&next->context.attach_count); atomic_inc(&next->context.attach_count);
/* Check for TLBs not flushed yet */ /* Check for TLBs not flushed yet */
if (next->context.flush_mm) __tlb_flush_mm_lazy(next);
__tlb_flush_mm(next);
} }
#define enter_lazy_tlb(mm,tsk) do { } while (0) #define enter_lazy_tlb(mm,tsk) do { } while (0)
......
...@@ -32,16 +32,6 @@ ...@@ -32,16 +32,6 @@
void storage_key_init_range(unsigned long start, unsigned long end); void storage_key_init_range(unsigned long start, unsigned long end);
static inline unsigned long pfmf(unsigned long function, unsigned long address)
{
asm volatile(
" .insn rre,0xb9af0000,%[function],%[address]"
: [address] "+a" (address)
: [function] "d" (function)
: "memory");
return address;
}
static inline void clear_page(void *page) static inline void clear_page(void *page)
{ {
register unsigned long reg1 asm ("1") = 0; register unsigned long reg1 asm ("1") = 0;
...@@ -150,15 +140,6 @@ static inline int page_reset_referenced(unsigned long addr) ...@@ -150,15 +140,6 @@ static inline int page_reset_referenced(unsigned long addr)
#define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */ #define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */
#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */ #define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
/*
* Test and clear referenced bit in storage key.
*/
#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
static inline int page_test_and_clear_young(unsigned long pfn)
{
return page_reset_referenced(pfn << PAGE_SHIFT);
}
struct page; struct page;
void arch_free_page(struct page *page, int order); void arch_free_page(struct page *page, int order);
void arch_alloc_page(struct page *page, int order); void arch_alloc_page(struct page *page, int order);
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
/* must be set before including pci_clp.h */ /* must be set before including pci_clp.h */
#define PCI_BAR_COUNT 6 #define PCI_BAR_COUNT 6
#include <linux/pci.h>
#include <asm-generic/pci.h> #include <asm-generic/pci.h>
#include <asm-generic/pci-dma-compat.h> #include <asm-generic/pci-dma-compat.h>
#include <asm/pci_clp.h> #include <asm/pci_clp.h>
...@@ -53,14 +54,9 @@ struct zpci_fmb { ...@@ -53,14 +54,9 @@ struct zpci_fmb {
atomic64_t unmapped_pages; atomic64_t unmapped_pages;
} __packed __aligned(16); } __packed __aligned(16);
struct msi_map { #define ZPCI_MSI_VEC_BITS 11
unsigned long irq; #define ZPCI_MSI_VEC_MAX (1 << ZPCI_MSI_VEC_BITS)
struct msi_desc *msi; #define ZPCI_MSI_VEC_MASK (ZPCI_MSI_VEC_MAX - 1)
struct hlist_node msi_chain;
};
#define ZPCI_NR_MSI_VECS 64
#define ZPCI_MSI_MASK (ZPCI_NR_MSI_VECS - 1)
enum zpci_state { enum zpci_state {
ZPCI_FN_STATE_RESERVED, ZPCI_FN_STATE_RESERVED,
...@@ -91,8 +87,7 @@ struct zpci_dev { ...@@ -91,8 +87,7 @@ struct zpci_dev {
/* IRQ stuff */ /* IRQ stuff */
u64 msi_addr; /* MSI address */ u64 msi_addr; /* MSI address */
struct zdev_irq_map *irq_map; struct airq_iv *aibv; /* adapter interrupt bit vector */
struct msi_map *msi_map[ZPCI_NR_MSI_VECS];
unsigned int aisb; /* number of the summary bit */ unsigned int aisb; /* number of the summary bit */
/* DMA stuff */ /* DMA stuff */
...@@ -122,11 +117,6 @@ struct zpci_dev { ...@@ -122,11 +117,6 @@ struct zpci_dev {
struct dentry *debugfs_perf; struct dentry *debugfs_perf;
}; };
struct pci_hp_callback_ops {
int (*create_slot) (struct zpci_dev *zdev);
void (*remove_slot) (struct zpci_dev *zdev);
};
static inline bool zdev_enabled(struct zpci_dev *zdev) static inline bool zdev_enabled(struct zpci_dev *zdev)
{ {
return (zdev->fh & (1UL << 31)) ? true : false; return (zdev->fh & (1UL << 31)) ? true : false;
...@@ -146,32 +136,38 @@ int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64); ...@@ -146,32 +136,38 @@ int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
int zpci_unregister_ioat(struct zpci_dev *, u8); int zpci_unregister_ioat(struct zpci_dev *, u8);
/* CLP */ /* CLP */
int clp_find_pci_devices(void); int clp_scan_pci_devices(void);
int clp_rescan_pci_devices(void);
int clp_rescan_pci_devices_simple(void);
int clp_add_pci_device(u32, u32, int); int clp_add_pci_device(u32, u32, int);
int clp_enable_fh(struct zpci_dev *, u8); int clp_enable_fh(struct zpci_dev *, u8);
int clp_disable_fh(struct zpci_dev *); int clp_disable_fh(struct zpci_dev *);
/* MSI */
struct msi_desc *__irq_get_msi_desc(unsigned int);
int zpci_msi_set_mask_bits(struct msi_desc *, u32, u32);
int zpci_setup_msi_irq(struct zpci_dev *, struct msi_desc *, unsigned int, int);
void zpci_teardown_msi_irq(struct zpci_dev *, struct msi_desc *);
int zpci_msihash_init(void);
void zpci_msihash_exit(void);
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
/* Error handling and recovery */ /* Error handling and recovery */
void zpci_event_error(void *); void zpci_event_error(void *);
void zpci_event_availability(void *); void zpci_event_availability(void *);
void zpci_rescan(void);
#else /* CONFIG_PCI */ #else /* CONFIG_PCI */
static inline void zpci_event_error(void *e) {} static inline void zpci_event_error(void *e) {}
static inline void zpci_event_availability(void *e) {} static inline void zpci_event_availability(void *e) {}
static inline void zpci_rescan(void) {}
#endif /* CONFIG_PCI */ #endif /* CONFIG_PCI */
#ifdef CONFIG_HOTPLUG_PCI_S390
int zpci_init_slot(struct zpci_dev *);
void zpci_exit_slot(struct zpci_dev *);
#else /* CONFIG_HOTPLUG_PCI_S390 */
static inline int zpci_init_slot(struct zpci_dev *zdev)
{
return 0;
}
static inline void zpci_exit_slot(struct zpci_dev *zdev) {}
#endif /* CONFIG_HOTPLUG_PCI_S390 */
/* Helpers */ /* Helpers */
struct zpci_dev *get_zdev(struct pci_dev *); struct zpci_dev *get_zdev(struct pci_dev *);
struct zpci_dev *get_zdev_by_fid(u32); struct zpci_dev *get_zdev_by_fid(u32);
bool zpci_fid_present(u32);
/* sysfs */ /* sysfs */
int zpci_sysfs_add_device(struct device *); int zpci_sysfs_add_device(struct device *);
...@@ -181,14 +177,6 @@ void zpci_sysfs_remove_device(struct device *); ...@@ -181,14 +177,6 @@ void zpci_sysfs_remove_device(struct device *);
int zpci_dma_init(void); int zpci_dma_init(void);
void zpci_dma_exit(void); void zpci_dma_exit(void);
/* Hotplug */
extern struct mutex zpci_list_lock;
extern struct list_head zpci_list;
extern unsigned int s390_pci_probe;
void zpci_register_hp_ops(struct pci_hp_callback_ops *);
void zpci_deregister_hp_ops(void);
/* FMB */ /* FMB */
int zpci_fmb_enable_device(struct zpci_dev *); int zpci_fmb_enable_device(struct zpci_dev *);
int zpci_fmb_disable_device(struct zpci_dev *); int zpci_fmb_disable_device(struct zpci_dev *);
......
...@@ -79,11 +79,11 @@ struct zpci_fib { ...@@ -79,11 +79,11 @@ struct zpci_fib {
} __packed; } __packed;
int s390pci_mod_fc(u64 req, struct zpci_fib *fib); int zpci_mod_fc(u64 req, struct zpci_fib *fib);
int s390pci_refresh_trans(u64 fn, u64 addr, u64 range); int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
int s390pci_load(u64 *data, u64 req, u64 offset); int zpci_load(u64 *data, u64 req, u64 offset);
int s390pci_store(u64 data, u64 req, u64 offset); int zpci_store(u64 data, u64 req, u64 offset);
int s390pci_store_block(const u64 *data, u64 req, u64 offset); int zpci_store_block(const u64 *data, u64 req, u64 offset);
void set_irq_ctrl(u16 ctl, char *unused, u8 isc); void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
#endif #endif
...@@ -36,7 +36,7 @@ static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \ ...@@ -36,7 +36,7 @@ static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
u64 data; \ u64 data; \
int rc; \ int rc; \
\ \
rc = s390pci_load(&data, req, ZPCI_OFFSET(addr)); \ rc = zpci_load(&data, req, ZPCI_OFFSET(addr)); \
if (rc) \ if (rc) \
data = -1ULL; \ data = -1ULL; \
return (RETTYPE) data; \ return (RETTYPE) data; \
...@@ -50,7 +50,7 @@ static inline void zpci_write_##VALTYPE(VALTYPE val, \ ...@@ -50,7 +50,7 @@ static inline void zpci_write_##VALTYPE(VALTYPE val, \
u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \ u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
u64 data = (VALTYPE) val; \ u64 data = (VALTYPE) val; \
\ \
s390pci_store(data, req, ZPCI_OFFSET(addr)); \ zpci_store(data, req, ZPCI_OFFSET(addr)); \
} }
zpci_read(8, u64) zpci_read(8, u64)
...@@ -83,7 +83,7 @@ static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len ...@@ -83,7 +83,7 @@ static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len
val = 0; /* let FW report error */ val = 0; /* let FW report error */
break; break;
} }
return s390pci_store(val, req, offset); return zpci_store(val, req, offset);
} }
static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len) static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
...@@ -91,7 +91,7 @@ static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len) ...@@ -91,7 +91,7 @@ static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
u64 data; u64 data;
int cc; int cc;
cc = s390pci_load(&data, req, offset); cc = zpci_load(&data, req, offset);
if (cc) if (cc)
goto out; goto out;
...@@ -115,7 +115,7 @@ static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len) ...@@ -115,7 +115,7 @@ static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
static inline int zpci_write_block(u64 req, const u64 *data, u64 offset) static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
{ {
return s390pci_store_block(data, req, offset); return zpci_store_block(data, req, offset);
} }
static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max) static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
......
This diff is collapsed.
#ifndef _ASM_S390_SERIAL_H
#define _ASM_S390_SERIAL_H
#define BASE_BAUD 0
#endif /* _ASM_S390_SERIAL_H */
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#define __ASM_SWITCH_TO_H #define __ASM_SWITCH_TO_H
#include <linux/thread_info.h> #include <linux/thread_info.h>
#include <asm/ptrace.h>
extern struct task_struct *__switch_to(void *, void *); extern struct task_struct *__switch_to(void *, void *);
extern void update_cr_regs(struct task_struct *task); extern void update_cr_regs(struct task_struct *task);
...@@ -68,12 +69,16 @@ static inline void restore_fp_regs(s390_fp_regs *fpregs) ...@@ -68,12 +69,16 @@ static inline void restore_fp_regs(s390_fp_regs *fpregs)
static inline void save_access_regs(unsigned int *acrs) static inline void save_access_regs(unsigned int *acrs)
{ {
asm volatile("stam 0,15,%0" : "=Q" (*acrs)); typedef struct { int _[NUM_ACRS]; } acrstype;
asm volatile("stam 0,15,%0" : "=Q" (*(acrstype *)acrs));
} }
static inline void restore_access_regs(unsigned int *acrs) static inline void restore_access_regs(unsigned int *acrs)
{ {
asm volatile("lam 0,15,%0" : : "Q" (*acrs)); typedef struct { int _[NUM_ACRS]; } acrstype;
asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs));
} }
#define switch_to(prev,next,last) do { \ #define switch_to(prev,next,last) do { \
......
...@@ -63,13 +63,14 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb, ...@@ -63,13 +63,14 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
static inline void tlb_flush_mmu(struct mmu_gather *tlb) static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{ {
__tlb_flush_mm_lazy(tlb->mm);
tlb_table_flush(tlb); tlb_table_flush(tlb);
} }
static inline void tlb_finish_mmu(struct mmu_gather *tlb, static inline void tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
tlb_table_flush(tlb); tlb_flush_mmu(tlb);
} }
/* /*
......
...@@ -86,7 +86,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) ...@@ -86,7 +86,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
__tlb_flush_full(mm); __tlb_flush_full(mm);
} }
static inline void __tlb_flush_mm_cond(struct mm_struct * mm) static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
{ {
if (mm->context.flush_mm) { if (mm->context.flush_mm) {
__tlb_flush_mm(mm); __tlb_flush_mm(mm);
...@@ -118,13 +118,13 @@ static inline void __tlb_flush_mm_cond(struct mm_struct * mm) ...@@ -118,13 +118,13 @@ static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
static inline void flush_tlb_mm(struct mm_struct *mm) static inline void flush_tlb_mm(struct mm_struct *mm)
{ {
__tlb_flush_mm_cond(mm); __tlb_flush_mm_lazy(mm);
} }
static inline void flush_tlb_range(struct vm_area_struct *vma, static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
__tlb_flush_mm_cond(vma->vm_mm); __tlb_flush_mm_lazy(vma->vm_mm);
} }
static inline void flush_tlb_kernel_range(unsigned long start, static inline void flush_tlb_kernel_range(unsigned long start,
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/sigp.h> #include <asm/sigp.h>
#include <asm/irq.h>
__PT_R0 = __PT_GPRS __PT_R0 = __PT_GPRS
__PT_R1 = __PT_GPRS + 4 __PT_R1 = __PT_GPRS + 4
...@@ -435,6 +436,11 @@ io_skip: ...@@ -435,6 +436,11 @@ io_skip:
io_loop: io_loop:
l %r1,BASED(.Ldo_IRQ) l %r1,BASED(.Ldo_IRQ)
lr %r2,%r11 # pass pointer to pt_regs lr %r2,%r11 # pass pointer to pt_regs
lhi %r3,IO_INTERRUPT
tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
jz io_call
lhi %r3,THIN_INTERRUPT
io_call:
basr %r14,%r1 # call do_IRQ basr %r14,%r1 # call do_IRQ
tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR
jz io_return jz io_return
...@@ -584,9 +590,10 @@ ext_skip: ...@@ -584,9 +590,10 @@ ext_skip:
mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
TRACE_IRQS_OFF TRACE_IRQS_OFF
l %r1,BASED(.Ldo_IRQ)
lr %r2,%r11 # pass pointer to pt_regs lr %r2,%r11 # pass pointer to pt_regs
l %r1,BASED(.Ldo_extint) lhi %r3,EXT_INTERRUPT
basr %r14,%r1 # call do_extint basr %r14,%r1 # call do_IRQ
j io_return j io_return
/* /*
...@@ -879,13 +886,13 @@ cleanup_idle: ...@@ -879,13 +886,13 @@ cleanup_idle:
stm %r9,%r10,__LC_SYSTEM_TIMER stm %r9,%r10,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
# prepare return psw # prepare return psw
n %r8,BASED(cleanup_idle_wait) # clear wait state bit n %r8,BASED(cleanup_idle_wait) # clear irq & wait state bits
l %r9,24(%r11) # return from psw_idle l %r9,24(%r11) # return from psw_idle
br %r14 br %r14
cleanup_idle_insn: cleanup_idle_insn:
.long psw_idle_lpsw + 0x80000000 .long psw_idle_lpsw + 0x80000000
cleanup_idle_wait: cleanup_idle_wait:
.long 0xfffdffff .long 0xfcfdffff
/* /*
* Integer constants * Integer constants
...@@ -902,7 +909,6 @@ cleanup_idle_wait: ...@@ -902,7 +909,6 @@ cleanup_idle_wait:
.Ldo_machine_check: .long s390_do_machine_check .Ldo_machine_check: .long s390_do_machine_check
.Lhandle_mcck: .long s390_handle_mcck .Lhandle_mcck: .long s390_handle_mcck
.Ldo_IRQ: .long do_IRQ .Ldo_IRQ: .long do_IRQ
.Ldo_extint: .long do_extint
.Ldo_signal: .long do_signal .Ldo_signal: .long do_signal
.Ldo_notify_resume: .long do_notify_resume .Ldo_notify_resume: .long do_notify_resume
.Ldo_per_trap: .long do_per_trap .Ldo_per_trap: .long do_per_trap
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/sigp.h> #include <asm/sigp.h>
#include <asm/irq.h>
__PT_R0 = __PT_GPRS __PT_R0 = __PT_GPRS
__PT_R1 = __PT_GPRS + 8 __PT_R1 = __PT_GPRS + 8
...@@ -468,6 +469,11 @@ io_skip: ...@@ -468,6 +469,11 @@ io_skip:
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
io_loop: io_loop:
lgr %r2,%r11 # pass pointer to pt_regs lgr %r2,%r11 # pass pointer to pt_regs
lghi %r3,IO_INTERRUPT
tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
jz io_call
lghi %r3,THIN_INTERRUPT
io_call:
brasl %r14,do_IRQ brasl %r14,do_IRQ
tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR
jz io_return jz io_return
...@@ -623,7 +629,8 @@ ext_skip: ...@@ -623,7 +629,8 @@ ext_skip:
TRACE_IRQS_OFF TRACE_IRQS_OFF
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_extint lghi %r3,EXT_INTERRUPT
brasl %r14,do_IRQ
j io_return j io_return
/* /*
...@@ -922,7 +929,7 @@ cleanup_idle: ...@@ -922,7 +929,7 @@ cleanup_idle:
stg %r9,__LC_SYSTEM_TIMER stg %r9,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
# prepare return psw # prepare return psw
nihh %r8,0xfffd # clear wait state bit nihh %r8,0xfcfd # clear irq & wait state bits
lg %r9,48(%r11) # return from psw_idle lg %r9,48(%r11) # return from psw_idle
br %r14 br %r14
cleanup_idle_insn: cleanup_idle_insn:
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <asm/cputime.h> #include <asm/cputime.h>
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/hw_irq.h>
#include "entry.h" #include "entry.h"
DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat); DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
...@@ -42,9 +43,10 @@ struct irq_class { ...@@ -42,9 +43,10 @@ struct irq_class {
* Since the external and I/O interrupt fields are already sums we would end * Since the external and I/O interrupt fields are already sums we would end
* up with having a sum which accounts each interrupt twice. * up with having a sum which accounts each interrupt twice.
*/ */
static const struct irq_class irqclass_main_desc[NR_IRQS] = { static const struct irq_class irqclass_main_desc[NR_IRQS_BASE] = {
[EXTERNAL_INTERRUPT] = {.name = "EXT"}, [EXT_INTERRUPT] = {.name = "EXT"},
[IO_INTERRUPT] = {.name = "I/O"} [IO_INTERRUPT] = {.name = "I/O"},
[THIN_INTERRUPT] = {.name = "AIO"},
}; };
/* /*
...@@ -86,6 +88,28 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = { ...@@ -86,6 +88,28 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
[CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"}, [CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"},
}; };
void __init init_IRQ(void)
{
irq_reserve_irqs(0, THIN_INTERRUPT);
init_cio_interrupts();
init_airq_interrupts();
init_ext_interrupts();
}
void do_IRQ(struct pt_regs *regs, int irq)
{
struct pt_regs *old_regs;
old_regs = set_irq_regs(regs);
irq_enter();
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
/* Serve timer interrupts first. */
clock_comparator_work();
generic_handle_irq(irq);
irq_exit();
set_irq_regs(old_regs);
}
/* /*
* show_interrupts is needed by /proc/interrupts. * show_interrupts is needed by /proc/interrupts.
*/ */
...@@ -100,27 +124,36 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -100,27 +124,36 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
seq_printf(p, "CPU%d ", cpu); seq_printf(p, "CPU%d ", cpu);
seq_putc(p, '\n'); seq_putc(p, '\n');
goto out;
} }
if (irq < NR_IRQS) { if (irq < NR_IRQS) {
if (irq >= NR_IRQS_BASE)
goto out;
seq_printf(p, "%s: ", irqclass_main_desc[irq].name); seq_printf(p, "%s: ", irqclass_main_desc[irq].name);
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[irq]); seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
seq_putc(p, '\n'); seq_putc(p, '\n');
goto skip_arch_irqs; goto out;
} }
for (irq = 0; irq < NR_ARCH_IRQS; irq++) { for (irq = 0; irq < NR_ARCH_IRQS; irq++) {
seq_printf(p, "%s: ", irqclass_sub_desc[irq].name); seq_printf(p, "%s: ", irqclass_sub_desc[irq].name);
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).irqs[irq]); seq_printf(p, "%10u ",
per_cpu(irq_stat, cpu).irqs[irq]);
if (irqclass_sub_desc[irq].desc) if (irqclass_sub_desc[irq].desc)
seq_printf(p, " %s", irqclass_sub_desc[irq].desc); seq_printf(p, " %s", irqclass_sub_desc[irq].desc);
seq_putc(p, '\n'); seq_putc(p, '\n');
} }
skip_arch_irqs: out:
put_online_cpus(); put_online_cpus();
return 0; return 0;
} }
int arch_show_interrupts(struct seq_file *p, int prec)
{
return 0;
}
/* /*
* Switch to the asynchronous interrupt stack for softirq execution. * Switch to the asynchronous interrupt stack for softirq execution.
*/ */
...@@ -159,14 +192,6 @@ asmlinkage void do_softirq(void) ...@@ -159,14 +192,6 @@ asmlinkage void do_softirq(void)
local_irq_restore(flags); local_irq_restore(flags);
} }
#ifdef CONFIG_PROC_FS
void init_irq_proc(void)
{
if (proc_mkdir("irq", NULL))
create_prof_cpu_mask();
}
#endif
/* /*
* ext_int_hash[index] is the list head for all external interrupts that hash * ext_int_hash[index] is the list head for all external interrupts that hash
* to this index. * to this index.
...@@ -183,14 +208,6 @@ struct ext_int_info { ...@@ -183,14 +208,6 @@ struct ext_int_info {
/* ext_int_hash_lock protects the handler lists for external interrupts */ /* ext_int_hash_lock protects the handler lists for external interrupts */
DEFINE_SPINLOCK(ext_int_hash_lock); DEFINE_SPINLOCK(ext_int_hash_lock);
static void __init init_external_interrupts(void)
{
int idx;
for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++)
INIT_LIST_HEAD(&ext_int_hash[idx]);
}
static inline int ext_hash(u16 code) static inline int ext_hash(u16 code)
{ {
return (code + (code >> 9)) & 0xff; return (code + (code >> 9)) & 0xff;
...@@ -234,20 +251,13 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler) ...@@ -234,20 +251,13 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
} }
EXPORT_SYMBOL(unregister_external_interrupt); EXPORT_SYMBOL(unregister_external_interrupt);
void __irq_entry do_extint(struct pt_regs *regs) static irqreturn_t do_ext_interrupt(int irq, void *dummy)
{ {
struct pt_regs *regs = get_irq_regs();
struct ext_code ext_code; struct ext_code ext_code;
struct pt_regs *old_regs;
struct ext_int_info *p; struct ext_int_info *p;
int index; int index;
old_regs = set_irq_regs(regs);
irq_enter();
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) {
/* Serve timer interrupts first. */
clock_comparator_work();
}
kstat_incr_irqs_this_cpu(EXTERNAL_INTERRUPT, NULL);
ext_code = *(struct ext_code *) &regs->int_code; ext_code = *(struct ext_code *) &regs->int_code;
if (ext_code.code != 0x1004) if (ext_code.code != 0x1004)
__get_cpu_var(s390_idle).nohz_delay = 1; __get_cpu_var(s390_idle).nohz_delay = 1;
...@@ -259,13 +269,25 @@ void __irq_entry do_extint(struct pt_regs *regs) ...@@ -259,13 +269,25 @@ void __irq_entry do_extint(struct pt_regs *regs)
p->handler(ext_code, regs->int_parm, p->handler(ext_code, regs->int_parm,
regs->int_parm_long); regs->int_parm_long);
rcu_read_unlock(); rcu_read_unlock();
irq_exit();
set_irq_regs(old_regs); return IRQ_HANDLED;
} }
void __init init_IRQ(void) static struct irqaction external_interrupt = {
.name = "EXT",
.handler = do_ext_interrupt,
};
void __init init_ext_interrupts(void)
{ {
init_external_interrupts(); int idx;
for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++)
INIT_LIST_HEAD(&ext_int_hash[idx]);
irq_set_chip_and_handler(EXT_INTERRUPT,
&dummy_irq_chip, handle_percpu_irq);
setup_irq(EXT_INTERRUPT, &external_interrupt);
} }
static DEFINE_SPINLOCK(sc_irq_lock); static DEFINE_SPINLOCK(sc_irq_lock);
...@@ -313,69 +335,3 @@ void measurement_alert_subclass_unregister(void) ...@@ -313,69 +335,3 @@ void measurement_alert_subclass_unregister(void)
spin_unlock(&ma_subclass_lock); spin_unlock(&ma_subclass_lock);
} }
EXPORT_SYMBOL(measurement_alert_subclass_unregister); EXPORT_SYMBOL(measurement_alert_subclass_unregister);
#ifdef CONFIG_SMP
void synchronize_irq(unsigned int irq)
{
/*
* Not needed, the handler is protected by a lock and IRQs that occur
* after the handler is deleted are just NOPs.
*/
}
EXPORT_SYMBOL_GPL(synchronize_irq);
#endif
#ifndef CONFIG_PCI
/* Only PCI devices have dynamically-defined IRQ handlers */
int request_irq(unsigned int irq, irq_handler_t handler,
unsigned long irqflags, const char *devname, void *dev_id)
{
return -EINVAL;
}
EXPORT_SYMBOL_GPL(request_irq);
void free_irq(unsigned int irq, void *dev_id)
{
WARN_ON(1);
}
EXPORT_SYMBOL_GPL(free_irq);
void enable_irq(unsigned int irq)
{
WARN_ON(1);
}
EXPORT_SYMBOL_GPL(enable_irq);
void disable_irq(unsigned int irq)
{
WARN_ON(1);
}
EXPORT_SYMBOL_GPL(disable_irq);
#endif /* !CONFIG_PCI */
void disable_irq_nosync(unsigned int irq)
{
disable_irq(irq);
}
EXPORT_SYMBOL_GPL(disable_irq_nosync);
unsigned long probe_irq_on(void)
{
return 0;
}
EXPORT_SYMBOL_GPL(probe_irq_on);
int probe_irq_off(unsigned long val)
{
return 0;
}
EXPORT_SYMBOL_GPL(probe_irq_off);
unsigned int probe_irq_mask(unsigned long val)
{
return val;
}
EXPORT_SYMBOL_GPL(probe_irq_mask);
...@@ -105,14 +105,31 @@ static int __kprobes get_fixup_type(kprobe_opcode_t *insn) ...@@ -105,14 +105,31 @@ static int __kprobes get_fixup_type(kprobe_opcode_t *insn)
fixup |= FIXUP_RETURN_REGISTER; fixup |= FIXUP_RETURN_REGISTER;
break; break;
case 0xeb: case 0xeb:
if ((insn[2] & 0xff) == 0x44 || /* bxhg */ switch (insn[2] & 0xff) {
(insn[2] & 0xff) == 0x45) /* bxleg */ case 0x44: /* bxhg */
case 0x45: /* bxleg */
fixup = FIXUP_BRANCH_NOT_TAKEN; fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
}
break; break;
case 0xe3: /* bctg */ case 0xe3: /* bctg */
if ((insn[2] & 0xff) == 0x46) if ((insn[2] & 0xff) == 0x46)
fixup = FIXUP_BRANCH_NOT_TAKEN; fixup = FIXUP_BRANCH_NOT_TAKEN;
break; break;
case 0xec:
switch (insn[2] & 0xff) {
case 0xe5: /* clgrb */
case 0xe6: /* cgrb */
case 0xf6: /* crb */
case 0xf7: /* clrb */
case 0xfc: /* cgib */
case 0xfd: /* cglib */
case 0xfe: /* cib */
case 0xff: /* clib */
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
}
break;
} }
return fixup; return fixup;
} }
......
...@@ -214,10 +214,7 @@ static int notrace s390_revalidate_registers(struct mci *mci) ...@@ -214,10 +214,7 @@ static int notrace s390_revalidate_registers(struct mci *mci)
: "0", "cc"); : "0", "cc");
#endif #endif
/* Revalidate clock comparator register */ /* Revalidate clock comparator register */
if (S390_lowcore.clock_comparator == -1) set_clock_comparator(S390_lowcore.clock_comparator);
set_clock_comparator(S390_lowcore.mcck_clock);
else
set_clock_comparator(S390_lowcore.clock_comparator);
/* Check if old PSW is valid */ /* Check if old PSW is valid */
if (!mci->wp) if (!mci->wp)
/* /*
......
...@@ -71,6 +71,7 @@ void arch_cpu_idle(void) ...@@ -71,6 +71,7 @@ void arch_cpu_idle(void)
} }
/* Halt the cpu and keep track of cpu time accounting. */ /* Halt the cpu and keep track of cpu time accounting. */
vtime_stop_cpu(); vtime_stop_cpu();
local_irq_enable();
} }
void arch_cpu_idle_exit(void) void arch_cpu_idle_exit(void)
......
...@@ -60,11 +60,11 @@ void update_cr_regs(struct task_struct *task) ...@@ -60,11 +60,11 @@ void update_cr_regs(struct task_struct *task)
__ctl_store(cr, 0, 2); __ctl_store(cr, 0, 2);
cr_new[1] = cr[1]; cr_new[1] = cr[1];
/* Set or clear transaction execution TXC/PIFO bits 8 and 9. */ /* Set or clear transaction execution TXC bit 8. */
if (task->thread.per_flags & PER_FLAG_NO_TE) if (task->thread.per_flags & PER_FLAG_NO_TE)
cr_new[0] = cr[0] & ~(3UL << 54); cr_new[0] = cr[0] & ~(1UL << 55);
else else
cr_new[0] = cr[0] | (3UL << 54); cr_new[0] = cr[0] | (1UL << 55);
/* Set or clear transaction execution TDC bits 62 and 63. */ /* Set or clear transaction execution TDC bits 62 and 63. */
cr_new[2] = cr[2] & ~3UL; cr_new[2] = cr[2] & ~3UL;
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
...@@ -1299,7 +1299,7 @@ int regs_query_register_offset(const char *name) ...@@ -1299,7 +1299,7 @@ int regs_query_register_offset(const char *name)
if (!name || *name != 'r') if (!name || *name != 'r')
return -EINVAL; return -EINVAL;
if (strict_strtoul(name + 1, 10, &offset)) if (kstrtoul(name + 1, 10, &offset))
return -EINVAL; return -EINVAL;
if (offset >= NUM_GPRS) if (offset >= NUM_GPRS)
return -EINVAL; return -EINVAL;
......
...@@ -10,6 +10,9 @@ ...@@ -10,6 +10,9 @@
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/ctl_reg.h> #include <asm/ctl_reg.h>
#include <asm/ipl.h>
#include <asm/cio.h>
#include <asm/pci.h>
/* /*
* References to section boundaries * References to section boundaries
...@@ -211,3 +214,11 @@ void restore_processor_state(void) ...@@ -211,3 +214,11 @@ void restore_processor_state(void)
__ctl_set_bit(0,28); __ctl_set_bit(0,28);
local_mcck_enable(); local_mcck_enable();
} }
/* Called at the end of swsusp_arch_resume */
void s390_early_resume(void)
{
lgr_info_log();
channel_subsystem_reinit();
zpci_rescan();
}
...@@ -281,11 +281,8 @@ restore_registers: ...@@ -281,11 +281,8 @@ restore_registers:
lghi %r2,0 lghi %r2,0
brasl %r14,arch_set_page_states brasl %r14,arch_set_page_states
/* Log potential guest relocation */ /* Call arch specific early resume code */
brasl %r14,lgr_info_log brasl %r14,s390_early_resume
/* Reinitialize the channel subsystem */
brasl %r14,channel_subsystem_reinit
/* Return 0 */ /* Return 0 */
lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
......
...@@ -92,7 +92,6 @@ void clock_comparator_work(void) ...@@ -92,7 +92,6 @@ void clock_comparator_work(void)
struct clock_event_device *cd; struct clock_event_device *cd;
S390_lowcore.clock_comparator = -1ULL; S390_lowcore.clock_comparator = -1ULL;
set_clock_comparator(S390_lowcore.clock_comparator);
cd = &__get_cpu_var(comparators); cd = &__get_cpu_var(comparators);
cd->event_handler(cd); cd->event_handler(cd);
} }
......
...@@ -63,7 +63,7 @@ static int __init vdso_setup(char *s) ...@@ -63,7 +63,7 @@ static int __init vdso_setup(char *s)
else if (strncmp(s, "off", 4) == 0) else if (strncmp(s, "off", 4) == 0)
vdso_enabled = 0; vdso_enabled = 0;
else { else {
rc = strict_strtoul(s, 0, &val); rc = kstrtoul(s, 0, &val);
vdso_enabled = rc ? 0 : !!val; vdso_enabled = rc ? 0 : !!val;
} }
return !rc; return !rc;
...@@ -113,11 +113,11 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore) ...@@ -113,11 +113,11 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY, clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
PAGE_SIZE << SEGMENT_ORDER); PAGE_SIZE << SEGMENT_ORDER);
clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY, clear_table((unsigned long *) page_table, _PAGE_INVALID,
256*sizeof(unsigned long)); 256*sizeof(unsigned long));
*(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table; *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
*(unsigned long *) page_table = _PAGE_RO + page_frame; *(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
psal = (u32 *) (page_table + 256*sizeof(unsigned long)); psal = (u32 *) (page_table + 256*sizeof(unsigned long));
aste = psal + 32; aste = psal + 32;
......
...@@ -44,7 +44,6 @@ static void __udelay_disabled(unsigned long long usecs) ...@@ -44,7 +44,6 @@ static void __udelay_disabled(unsigned long long usecs)
do { do {
set_clock_comparator(end); set_clock_comparator(end);
vtime_stop_cpu(); vtime_stop_cpu();
local_irq_disable();
} while (get_tod_clock() < end); } while (get_tod_clock() < end);
lockdep_on(); lockdep_on();
__ctl_load(cr0, 0, 0); __ctl_load(cr0, 0, 0);
...@@ -64,7 +63,6 @@ static void __udelay_enabled(unsigned long long usecs) ...@@ -64,7 +63,6 @@ static void __udelay_enabled(unsigned long long usecs)
set_clock_comparator(end); set_clock_comparator(end);
} }
vtime_stop_cpu(); vtime_stop_cpu();
local_irq_disable();
if (clock_saved) if (clock_saved)
local_tick_enable(clock_saved); local_tick_enable(clock_saved);
} while (get_tod_clock() < end); } while (get_tod_clock() < end);
......
...@@ -86,28 +86,28 @@ static unsigned long follow_table(struct mm_struct *mm, ...@@ -86,28 +86,28 @@ static unsigned long follow_table(struct mm_struct *mm,
switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
case _ASCE_TYPE_REGION1: case _ASCE_TYPE_REGION1:
table = table + ((address >> 53) & 0x7ff); table = table + ((address >> 53) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) if (unlikely(*table & _REGION_ENTRY_INVALID))
return -0x39UL; return -0x39UL;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
/* fallthrough */ /* fallthrough */
case _ASCE_TYPE_REGION2: case _ASCE_TYPE_REGION2:
table = table + ((address >> 42) & 0x7ff); table = table + ((address >> 42) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) if (unlikely(*table & _REGION_ENTRY_INVALID))
return -0x3aUL; return -0x3aUL;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
/* fallthrough */ /* fallthrough */
case _ASCE_TYPE_REGION3: case _ASCE_TYPE_REGION3:
table = table + ((address >> 31) & 0x7ff); table = table + ((address >> 31) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) if (unlikely(*table & _REGION_ENTRY_INVALID))
return -0x3bUL; return -0x3bUL;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
/* fallthrough */ /* fallthrough */
case _ASCE_TYPE_SEGMENT: case _ASCE_TYPE_SEGMENT:
table = table + ((address >> 20) & 0x7ff); table = table + ((address >> 20) & 0x7ff);
if (unlikely(*table & _SEGMENT_ENTRY_INV)) if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
return -0x10UL; return -0x10UL;
if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) { if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
if (write && (*table & _SEGMENT_ENTRY_RO)) if (write && (*table & _SEGMENT_ENTRY_PROTECT))
return -0x04UL; return -0x04UL;
return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) + return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
(address & ~_SEGMENT_ENTRY_ORIGIN_LARGE); (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
...@@ -117,7 +117,7 @@ static unsigned long follow_table(struct mm_struct *mm, ...@@ -117,7 +117,7 @@ static unsigned long follow_table(struct mm_struct *mm,
table = table + ((address >> 12) & 0xff); table = table + ((address >> 12) & 0xff);
if (unlikely(*table & _PAGE_INVALID)) if (unlikely(*table & _PAGE_INVALID))
return -0x11UL; return -0x11UL;
if (write && (*table & _PAGE_RO)) if (write && (*table & _PAGE_PROTECT))
return -0x04UL; return -0x04UL;
return (*table & PAGE_MASK) + (address & ~PAGE_MASK); return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
} }
...@@ -130,13 +130,13 @@ static unsigned long follow_table(struct mm_struct *mm, ...@@ -130,13 +130,13 @@ static unsigned long follow_table(struct mm_struct *mm,
unsigned long *table = (unsigned long *)__pa(mm->pgd); unsigned long *table = (unsigned long *)__pa(mm->pgd);
table = table + ((address >> 20) & 0x7ff); table = table + ((address >> 20) & 0x7ff);
if (unlikely(*table & _SEGMENT_ENTRY_INV)) if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
return -0x10UL; return -0x10UL;
table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
table = table + ((address >> 12) & 0xff); table = table + ((address >> 12) & 0xff);
if (unlikely(*table & _PAGE_INVALID)) if (unlikely(*table & _PAGE_INVALID))
return -0x11UL; return -0x11UL;
if (write && (*table & _PAGE_RO)) if (write && (*table & _PAGE_PROTECT))
return -0x04UL; return -0x04UL;
return (*table & PAGE_MASK) + (address & ~PAGE_MASK); return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
} }
......
...@@ -53,7 +53,7 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level) ...@@ -53,7 +53,7 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level)
seq_printf(m, "I\n"); seq_printf(m, "I\n");
return; return;
} }
seq_printf(m, "%s", pr & _PAGE_RO ? "RO " : "RW "); seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW ");
seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " "); seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " ");
seq_putc(m, '\n'); seq_putc(m, '\n');
} }
...@@ -105,12 +105,12 @@ static void note_page(struct seq_file *m, struct pg_state *st, ...@@ -105,12 +105,12 @@ static void note_page(struct seq_file *m, struct pg_state *st,
} }
/* /*
* The actual page table walker functions. In order to keep the implementation * The actual page table walker functions. In order to keep the
* of print_prot() short, we only check and pass _PAGE_INVALID and _PAGE_RO * implementation of print_prot() short, we only check and pass
* flags to note_page() if a region, segment or page table entry is invalid or * _PAGE_INVALID and _PAGE_PROTECT flags to note_page() if a region,
* read-only. * segment or page table entry is invalid or read-only.
* After all it's just a hint that the current level being walked contains an * After all it's just a hint that the current level being walked
* invalid or read-only entry. * contains an invalid or read-only entry.
*/ */
static void walk_pte_level(struct seq_file *m, struct pg_state *st, static void walk_pte_level(struct seq_file *m, struct pg_state *st,
pmd_t *pmd, unsigned long addr) pmd_t *pmd, unsigned long addr)
...@@ -122,14 +122,14 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, ...@@ -122,14 +122,14 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) { for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) {
st->current_address = addr; st->current_address = addr;
pte = pte_offset_kernel(pmd, addr); pte = pte_offset_kernel(pmd, addr);
prot = pte_val(*pte) & (_PAGE_RO | _PAGE_INVALID); prot = pte_val(*pte) & (_PAGE_PROTECT | _PAGE_INVALID);
note_page(m, st, prot, 4); note_page(m, st, prot, 4);
addr += PAGE_SIZE; addr += PAGE_SIZE;
} }
} }
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define _PMD_PROT_MASK (_SEGMENT_ENTRY_RO | _SEGMENT_ENTRY_CO) #define _PMD_PROT_MASK (_SEGMENT_ENTRY_PROTECT | _SEGMENT_ENTRY_CO)
#else #else
#define _PMD_PROT_MASK 0 #define _PMD_PROT_MASK 0
#endif #endif
......
...@@ -24,7 +24,7 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, ...@@ -24,7 +24,7 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
pte_t *ptep, pte; pte_t *ptep, pte;
struct page *page; struct page *page;
mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL; mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr); ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
do { do {
...@@ -55,8 +55,8 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, ...@@ -55,8 +55,8 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
struct page *head, *page, *tail; struct page *head, *page, *tail;
int refs; int refs;
result = write ? 0 : _SEGMENT_ENTRY_RO; result = write ? 0 : _SEGMENT_ENTRY_PROTECT;
mask = result | _SEGMENT_ENTRY_INV; mask = result | _SEGMENT_ENTRY_INVALID;
if ((pmd_val(pmd) & mask) != result) if ((pmd_val(pmd) & mask) != result)
return 0; return 0;
VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT)); VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
......
...@@ -8,21 +8,127 @@ ...@@ -8,21 +8,127 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
static inline pmd_t __pte_to_pmd(pte_t pte)
{
int none, young, prot;
pmd_t pmd;
/*
* Convert encoding pte bits pmd bits
* .IR...wrdytp ..R...I...y.
* empty .10...000000 -> ..0...1...0.
* prot-none, clean, old .11...000001 -> ..0...1...1.
* prot-none, clean, young .11...000101 -> ..1...1...1.
* prot-none, dirty, old .10...001001 -> ..0...1...1.
* prot-none, dirty, young .10...001101 -> ..1...1...1.
* read-only, clean, old .11...010001 -> ..1...1...0.
* read-only, clean, young .01...010101 -> ..1...0...1.
* read-only, dirty, old .11...011001 -> ..1...1...0.
* read-only, dirty, young .01...011101 -> ..1...0...1.
* read-write, clean, old .11...110001 -> ..0...1...0.
* read-write, clean, young .01...110101 -> ..0...0...1.
* read-write, dirty, old .10...111001 -> ..0...1...0.
* read-write, dirty, young .00...111101 -> ..0...0...1.
* Huge ptes are dirty by definition, a clean pte is made dirty
* by the conversion.
*/
if (pte_present(pte)) {
pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
if (pte_val(pte) & _PAGE_INVALID)
pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
none = (pte_val(pte) & _PAGE_PRESENT) &&
!(pte_val(pte) & _PAGE_READ) &&
!(pte_val(pte) & _PAGE_WRITE);
prot = (pte_val(pte) & _PAGE_PROTECT) &&
!(pte_val(pte) & _PAGE_WRITE);
young = pte_val(pte) & _PAGE_YOUNG;
if (none || young)
pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
if (prot || (none && young))
pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
} else
pmd_val(pmd) = _SEGMENT_ENTRY_INVALID;
return pmd;
}
static inline pte_t __pmd_to_pte(pmd_t pmd)
{
pte_t pte;
/*
* Convert encoding pmd bits pte bits
* ..R...I...y. .IR...wrdytp
* empty ..0...1...0. -> .10...000000
* prot-none, old ..0...1...1. -> .10...001001
* prot-none, young ..1...1...1. -> .10...001101
* read-only, old ..1...1...0. -> .11...011001
* read-only, young ..1...0...1. -> .01...011101
* read-write, old ..0...1...0. -> .10...111001
* read-write, young ..0...0...1. -> .00...111101
* Huge ptes are dirty by definition
*/
if (pmd_present(pmd)) {
pte_val(pte) = _PAGE_PRESENT | _PAGE_LARGE | _PAGE_DIRTY |
(pmd_val(pmd) & PAGE_MASK);
if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID)
pte_val(pte) |= _PAGE_INVALID;
if (pmd_prot_none(pmd)) {
if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
pte_val(pte) |= _PAGE_YOUNG;
} else {
pte_val(pte) |= _PAGE_READ;
if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
pte_val(pte) |= _PAGE_PROTECT;
else
pte_val(pte) |= _PAGE_WRITE;
if (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)
pte_val(pte) |= _PAGE_YOUNG;
}
} else
pte_val(pte) = _PAGE_INVALID;
return pte;
}
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *pteptr, pte_t pteval) pte_t *ptep, pte_t pte)
{ {
pmd_t *pmdp = (pmd_t *) pteptr; pmd_t pmd;
unsigned long mask;
pmd = __pte_to_pmd(pte);
if (!MACHINE_HAS_HPAGE) { if (!MACHINE_HAS_HPAGE) {
pteptr = (pte_t *) pte_page(pteval)[1].index; pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
mask = pte_val(pteval) & pmd_val(pmd) |= pte_page(pte)[1].index;
(_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO); } else
pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask; pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO;
*(pmd_t *) ptep = pmd;
}
pte_t huge_ptep_get(pte_t *ptep)
{
unsigned long origin;
pmd_t pmd;
pmd = *(pmd_t *) ptep;
if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) {
origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
pmd_val(pmd) |= *(unsigned long *) origin;
} }
return __pmd_to_pte(pmd);
}
pmd_val(*pmdp) = pte_val(pteval); pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pmd_t *pmdp = (pmd_t *) ptep;
pte_t pte = huge_ptep_get(ptep);
if (MACHINE_HAS_IDTE)
__pmd_idte(addr, pmdp);
else
__pmd_csp(pmdp);
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
return pte;
} }
int arch_prepare_hugepage(struct page *page) int arch_prepare_hugepage(struct page *page)
...@@ -58,7 +164,7 @@ void arch_release_hugepage(struct page *page) ...@@ -58,7 +164,7 @@ void arch_release_hugepage(struct page *page)
ptep = (pte_t *) page[1].index; ptep = (pte_t *) page[1].index;
if (!ptep) if (!ptep)
return; return;
clear_table((unsigned long *) ptep, _PAGE_TYPE_EMPTY, clear_table((unsigned long *) ptep, _PAGE_INVALID,
PTRS_PER_PTE * sizeof(pte_t)); PTRS_PER_PTE * sizeof(pte_t));
page_table_free(&init_mm, (unsigned long *) ptep); page_table_free(&init_mm, (unsigned long *) ptep);
page[1].index = 0; page[1].index = 0;
......
...@@ -118,7 +118,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable) ...@@ -118,7 +118,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
pte = pte_offset_kernel(pmd, address); pte = pte_offset_kernel(pmd, address);
if (!enable) { if (!enable) {
__ptep_ipte(address, pte); __ptep_ipte(address, pte);
pte_val(*pte) = _PAGE_TYPE_EMPTY; pte_val(*pte) = _PAGE_INVALID;
continue; continue;
} }
pte_val(*pte) = __pa(address); pte_val(*pte) = __pa(address);
......
This diff is collapsed.
...@@ -69,7 +69,7 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address) ...@@ -69,7 +69,7 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address)
pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
if (!pte) if (!pte)
return NULL; return NULL;
clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, clear_table((unsigned long *) pte, _PAGE_INVALID,
PTRS_PER_PTE * sizeof(pte_t)); PTRS_PER_PTE * sizeof(pte_t));
return pte; return pte;
} }
...@@ -101,7 +101,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) ...@@ -101,7 +101,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
!(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
pud_val(*pu_dir) = __pa(address) | pud_val(*pu_dir) = __pa(address) |
_REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE | _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
(ro ? _REGION_ENTRY_RO : 0); (ro ? _REGION_ENTRY_PROTECT : 0);
address += PUD_SIZE; address += PUD_SIZE;
continue; continue;
} }
...@@ -118,7 +118,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) ...@@ -118,7 +118,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
!(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
pmd_val(*pm_dir) = __pa(address) | pmd_val(*pm_dir) = __pa(address) |
_SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
(ro ? _SEGMENT_ENTRY_RO : 0); _SEGMENT_ENTRY_YOUNG |
(ro ? _SEGMENT_ENTRY_PROTECT : 0);
address += PMD_SIZE; address += PMD_SIZE;
continue; continue;
} }
...@@ -131,7 +132,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) ...@@ -131,7 +132,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
} }
pt_dir = pte_offset_kernel(pm_dir, address); pt_dir = pte_offset_kernel(pm_dir, address);
pte_val(*pt_dir) = __pa(address) | (ro ? _PAGE_RO : 0); pte_val(*pt_dir) = __pa(address) |
pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
address += PAGE_SIZE; address += PAGE_SIZE;
} }
ret = 0; ret = 0;
...@@ -154,7 +156,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size) ...@@ -154,7 +156,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
pte_t *pt_dir; pte_t *pt_dir;
pte_t pte; pte_t pte;
pte_val(pte) = _PAGE_TYPE_EMPTY; pte_val(pte) = _PAGE_INVALID;
while (address < end) { while (address < end) {
pg_dir = pgd_offset_k(address); pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) { if (pgd_none(*pg_dir)) {
...@@ -255,7 +257,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) ...@@ -255,7 +257,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
new_page =__pa(vmem_alloc_pages(0)); new_page =__pa(vmem_alloc_pages(0));
if (!new_page) if (!new_page)
goto out; goto out;
pte_val(*pt_dir) = __pa(new_page); pte_val(*pt_dir) =
__pa(new_page) | pgprot_val(PAGE_KERNEL);
} }
address += PAGE_SIZE; address += PAGE_SIZE;
} }
......
...@@ -2,5 +2,5 @@ ...@@ -2,5 +2,5 @@
# Makefile for the s390 PCI subsystem. # Makefile for the s390 PCI subsystem.
# #
obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o pci_sysfs.o \ obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_sysfs.o \
pci_event.o pci_debug.o pci_insn.o pci_event.o pci_debug.o pci_insn.o
This diff is collapsed.
...@@ -36,9 +36,9 @@ static inline u8 clp_instr(void *data) ...@@ -36,9 +36,9 @@ static inline u8 clp_instr(void *data)
return cc; return cc;
} }
static void *clp_alloc_block(void) static void *clp_alloc_block(gfp_t gfp_mask)
{ {
return (void *) __get_free_pages(GFP_KERNEL, get_order(CLP_BLK_SIZE)); return (void *) __get_free_pages(gfp_mask, get_order(CLP_BLK_SIZE));
} }
static void clp_free_block(void *ptr) static void clp_free_block(void *ptr)
...@@ -70,7 +70,7 @@ static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid) ...@@ -70,7 +70,7 @@ static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
struct clp_req_rsp_query_pci_grp *rrb; struct clp_req_rsp_query_pci_grp *rrb;
int rc; int rc;
rrb = clp_alloc_block(); rrb = clp_alloc_block(GFP_KERNEL);
if (!rrb) if (!rrb)
return -ENOMEM; return -ENOMEM;
...@@ -113,7 +113,7 @@ static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh) ...@@ -113,7 +113,7 @@ static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
struct clp_req_rsp_query_pci *rrb; struct clp_req_rsp_query_pci *rrb;
int rc; int rc;
rrb = clp_alloc_block(); rrb = clp_alloc_block(GFP_KERNEL);
if (!rrb) if (!rrb)
return -ENOMEM; return -ENOMEM;
...@@ -179,9 +179,9 @@ int clp_add_pci_device(u32 fid, u32 fh, int configured) ...@@ -179,9 +179,9 @@ int clp_add_pci_device(u32 fid, u32 fh, int configured)
static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command) static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
{ {
struct clp_req_rsp_set_pci *rrb; struct clp_req_rsp_set_pci *rrb;
int rc, retries = 1000; int rc, retries = 100;
rrb = clp_alloc_block(); rrb = clp_alloc_block(GFP_KERNEL);
if (!rrb) if (!rrb)
return -ENOMEM; return -ENOMEM;
...@@ -199,7 +199,7 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command) ...@@ -199,7 +199,7 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
retries--; retries--;
if (retries < 0) if (retries < 0)
break; break;
msleep(1); msleep(20);
} }
} while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY); } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
...@@ -245,49 +245,12 @@ int clp_disable_fh(struct zpci_dev *zdev) ...@@ -245,49 +245,12 @@ int clp_disable_fh(struct zpci_dev *zdev)
return rc; return rc;
} }
static void clp_check_pcifn_entry(struct clp_fh_list_entry *entry) static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
void (*cb)(struct clp_fh_list_entry *entry))
{ {
int present, rc;
if (!entry->vendor_id)
return;
/* TODO: be a little bit more scalable */
present = zpci_fid_present(entry->fid);
if (present)
pr_debug("%s: device %x already present\n", __func__, entry->fid);
/* skip already used functions */
if (present && entry->config_state)
return;
/* aev 306: function moved to stand-by state */
if (present && !entry->config_state) {
/*
* The handle is already disabled, that means no iota/irq freeing via
* the firmware interfaces anymore. Need to free resources manually
* (DMA memory, debug, sysfs)...
*/
zpci_stop_device(get_zdev_by_fid(entry->fid));
return;
}
rc = clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
if (rc)
pr_err("Failed to add fid: 0x%x\n", entry->fid);
}
int clp_find_pci_devices(void)
{
struct clp_req_rsp_list_pci *rrb;
u64 resume_token = 0; u64 resume_token = 0;
int entries, i, rc; int entries, i, rc;
rrb = clp_alloc_block();
if (!rrb)
return -ENOMEM;
do { do {
memset(rrb, 0, sizeof(*rrb)); memset(rrb, 0, sizeof(*rrb));
rrb->request.hdr.len = sizeof(rrb->request); rrb->request.hdr.len = sizeof(rrb->request);
...@@ -316,12 +279,101 @@ int clp_find_pci_devices(void) ...@@ -316,12 +279,101 @@ int clp_find_pci_devices(void)
resume_token = rrb->response.resume_token; resume_token = rrb->response.resume_token;
for (i = 0; i < entries; i++) for (i = 0; i < entries; i++)
clp_check_pcifn_entry(&rrb->response.fh_list[i]); cb(&rrb->response.fh_list[i]);
} while (resume_token); } while (resume_token);
pr_debug("Maximum number of supported PCI functions: %u\n", pr_debug("Maximum number of supported PCI functions: %u\n",
rrb->response.max_fn); rrb->response.max_fn);
out: out:
return rc;
}
static void __clp_add(struct clp_fh_list_entry *entry)
{
if (!entry->vendor_id)
return;
clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
}
static void __clp_rescan(struct clp_fh_list_entry *entry)
{
struct zpci_dev *zdev;
if (!entry->vendor_id)
return;
zdev = get_zdev_by_fid(entry->fid);
if (!zdev) {
clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
return;
}
if (!entry->config_state) {
/*
* The handle is already disabled, that means no iota/irq freeing via
* the firmware interfaces anymore. Need to free resources manually
* (DMA memory, debug, sysfs)...
*/
zpci_stop_device(zdev);
}
}
static void __clp_update(struct clp_fh_list_entry *entry)
{
struct zpci_dev *zdev;
if (!entry->vendor_id)
return;
zdev = get_zdev_by_fid(entry->fid);
if (!zdev)
return;
zdev->fh = entry->fh;
}
int clp_scan_pci_devices(void)
{
struct clp_req_rsp_list_pci *rrb;
int rc;
rrb = clp_alloc_block(GFP_KERNEL);
if (!rrb)
return -ENOMEM;
rc = clp_list_pci(rrb, __clp_add);
clp_free_block(rrb);
return rc;
}
int clp_rescan_pci_devices(void)
{
struct clp_req_rsp_list_pci *rrb;
int rc;
rrb = clp_alloc_block(GFP_KERNEL);
if (!rrb)
return -ENOMEM;
rc = clp_list_pci(rrb, __clp_rescan);
clp_free_block(rrb);
return rc;
}
int clp_rescan_pci_devices_simple(void)
{
struct clp_req_rsp_list_pci *rrb;
int rc;
rrb = clp_alloc_block(GFP_NOWAIT);
if (!rrb)
return -ENOMEM;
rc = clp_list_pci(rrb, __clp_update);
clp_free_block(rrb); clp_free_block(rrb);
return rc; return rc;
} }
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/iommu-helper.h> #include <linux/iommu-helper.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <asm/pci_dma.h> #include <asm/pci_dma.h>
...@@ -170,8 +171,8 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, ...@@ -170,8 +171,8 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
*/ */
goto no_refresh; goto no_refresh;
rc = s390pci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
nr_pages * PAGE_SIZE); nr_pages * PAGE_SIZE);
no_refresh: no_refresh:
spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
...@@ -407,7 +408,6 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg, ...@@ -407,7 +408,6 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int zpci_dma_init_device(struct zpci_dev *zdev) int zpci_dma_init_device(struct zpci_dev *zdev)
{ {
unsigned int bitmap_order;
int rc; int rc;
spin_lock_init(&zdev->iommu_bitmap_lock); spin_lock_init(&zdev->iommu_bitmap_lock);
...@@ -421,12 +421,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev) ...@@ -421,12 +421,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET; zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT; zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
bitmap_order = get_order(zdev->iommu_pages / 8); zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
pr_info("iommu_size: 0x%lx iommu_pages: 0x%lx bitmap_order: %i\n",
zdev->iommu_size, zdev->iommu_pages, bitmap_order);
zdev->iommu_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
bitmap_order);
if (!zdev->iommu_bitmap) { if (!zdev->iommu_bitmap) {
rc = -ENOMEM; rc = -ENOMEM;
goto out_reg; goto out_reg;
...@@ -451,8 +446,7 @@ void zpci_dma_exit_device(struct zpci_dev *zdev) ...@@ -451,8 +446,7 @@ void zpci_dma_exit_device(struct zpci_dev *zdev)
{ {
zpci_unregister_ioat(zdev, 0); zpci_unregister_ioat(zdev, 0);
dma_cleanup_tables(zdev); dma_cleanup_tables(zdev);
free_pages((unsigned long) zdev->iommu_bitmap, vfree(zdev->iommu_bitmap);
get_order(zdev->iommu_pages / 8));
zdev->iommu_bitmap = NULL; zdev->iommu_bitmap = NULL;
zdev->next_bit = 0; zdev->next_bit = 0;
} }
......
...@@ -69,7 +69,7 @@ static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf) ...@@ -69,7 +69,7 @@ static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf)
clp_add_pci_device(ccdf->fid, ccdf->fh, 0); clp_add_pci_device(ccdf->fid, ccdf->fh, 0);
break; break;
case 0x0306: case 0x0306:
clp_find_pci_devices(); clp_rescan_pci_devices();
break; break;
default: default:
break; break;
......
...@@ -27,7 +27,7 @@ static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status) ...@@ -27,7 +27,7 @@ static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
return cc; return cc;
} }
int s390pci_mod_fc(u64 req, struct zpci_fib *fib) int zpci_mod_fc(u64 req, struct zpci_fib *fib)
{ {
u8 cc, status; u8 cc, status;
...@@ -61,7 +61,7 @@ static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status) ...@@ -61,7 +61,7 @@ static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
return cc; return cc;
} }
int s390pci_refresh_trans(u64 fn, u64 addr, u64 range) int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
{ {
u8 cc, status; u8 cc, status;
...@@ -78,7 +78,7 @@ int s390pci_refresh_trans(u64 fn, u64 addr, u64 range) ...@@ -78,7 +78,7 @@ int s390pci_refresh_trans(u64 fn, u64 addr, u64 range)
} }
/* Set Interruption Controls */ /* Set Interruption Controls */
void set_irq_ctrl(u16 ctl, char *unused, u8 isc) void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
{ {
asm volatile ( asm volatile (
" .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n" " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
...@@ -109,7 +109,7 @@ static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status) ...@@ -109,7 +109,7 @@ static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
return cc; return cc;
} }
int s390pci_load(u64 *data, u64 req, u64 offset) int zpci_load(u64 *data, u64 req, u64 offset)
{ {
u8 status; u8 status;
int cc; int cc;
...@@ -125,7 +125,7 @@ int s390pci_load(u64 *data, u64 req, u64 offset) ...@@ -125,7 +125,7 @@ int s390pci_load(u64 *data, u64 req, u64 offset)
__func__, cc, status, req, offset); __func__, cc, status, req, offset);
return (cc > 0) ? -EIO : cc; return (cc > 0) ? -EIO : cc;
} }
EXPORT_SYMBOL_GPL(s390pci_load); EXPORT_SYMBOL_GPL(zpci_load);
/* PCI Store */ /* PCI Store */
static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status) static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
...@@ -147,7 +147,7 @@ static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status) ...@@ -147,7 +147,7 @@ static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
return cc; return cc;
} }
int s390pci_store(u64 data, u64 req, u64 offset) int zpci_store(u64 data, u64 req, u64 offset)
{ {
u8 status; u8 status;
int cc; int cc;
...@@ -163,7 +163,7 @@ int s390pci_store(u64 data, u64 req, u64 offset) ...@@ -163,7 +163,7 @@ int s390pci_store(u64 data, u64 req, u64 offset)
__func__, cc, status, req, offset); __func__, cc, status, req, offset);
return (cc > 0) ? -EIO : cc; return (cc > 0) ? -EIO : cc;
} }
EXPORT_SYMBOL_GPL(s390pci_store); EXPORT_SYMBOL_GPL(zpci_store);
/* PCI Store Block */ /* PCI Store Block */
static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status) static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
...@@ -183,7 +183,7 @@ static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status) ...@@ -183,7 +183,7 @@ static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
return cc; return cc;
} }
int s390pci_store_block(const u64 *data, u64 req, u64 offset) int zpci_store_block(const u64 *data, u64 req, u64 offset)
{ {
u8 status; u8 status;
int cc; int cc;
...@@ -199,4 +199,4 @@ int s390pci_store_block(const u64 *data, u64 req, u64 offset) ...@@ -199,4 +199,4 @@ int s390pci_store_block(const u64 *data, u64 req, u64 offset)
__func__, cc, status, req, offset); __func__, cc, status, req, offset);
return (cc > 0) ? -EIO : cc; return (cc > 0) ? -EIO : cc;
} }
EXPORT_SYMBOL_GPL(s390pci_store_block); EXPORT_SYMBOL_GPL(zpci_store_block);
/*
* Copyright IBM Corp. 2012
*
* Author(s):
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
#define COMPONENT "zPCI"
#define pr_fmt(fmt) COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/rculist.h>
#include <linux/hash.h>
#include <linux/pci.h>
#include <linux/msi.h>
#include <asm/hw_irq.h>
/* mapping of irq numbers to msi_desc */
static struct hlist_head *msi_hash;
static const unsigned int msi_hash_bits = 8;
#define MSI_HASH_BUCKETS (1U << msi_hash_bits)
#define msi_hashfn(nr) hash_long(nr, msi_hash_bits)
static DEFINE_SPINLOCK(msi_map_lock);
struct msi_desc *__irq_get_msi_desc(unsigned int irq)
{
struct msi_map *map;
hlist_for_each_entry_rcu(map,
&msi_hash[msi_hashfn(irq)], msi_chain)
if (map->irq == irq)
return map->msi;
return NULL;
}
int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
{
if (msi->msi_attrib.is_msix) {
int offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_VECTOR_CTRL;
msi->masked = readl(msi->mask_base + offset);
writel(flag, msi->mask_base + offset);
} else {
if (msi->msi_attrib.maskbit) {
int pos;
u32 mask_bits;
pos = (long) msi->mask_base;
pci_read_config_dword(msi->dev, pos, &mask_bits);
mask_bits &= ~(mask);
mask_bits |= flag & mask;
pci_write_config_dword(msi->dev, pos, mask_bits);
} else {
return 0;
}
}
msi->msi_attrib.maskbit = !!flag;
return 1;
}
int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi,
unsigned int nr, int offset)
{
struct msi_map *map;
struct msi_msg msg;
int rc;
map = kmalloc(sizeof(*map), GFP_KERNEL);
if (map == NULL)
return -ENOMEM;
map->irq = nr;
map->msi = msi;
zdev->msi_map[nr & ZPCI_MSI_MASK] = map;
INIT_HLIST_NODE(&map->msi_chain);
pr_debug("%s hashing irq: %u to bucket nr: %llu\n",
__func__, nr, msi_hashfn(nr));
hlist_add_head_rcu(&map->msi_chain, &msi_hash[msi_hashfn(nr)]);
spin_lock(&msi_map_lock);
rc = irq_set_msi_desc(nr, msi);
if (rc) {
spin_unlock(&msi_map_lock);
hlist_del_rcu(&map->msi_chain);
kfree(map);
zdev->msi_map[nr & ZPCI_MSI_MASK] = NULL;
return rc;
}
spin_unlock(&msi_map_lock);
msg.data = nr - offset;
msg.address_lo = zdev->msi_addr & 0xffffffff;
msg.address_hi = zdev->msi_addr >> 32;
write_msi_msg(nr, &msg);
return 0;
}
void zpci_teardown_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi)
{
int irq = msi->irq & ZPCI_MSI_MASK;
struct msi_map *map;
msi->msg.address_lo = 0;
msi->msg.address_hi = 0;
msi->msg.data = 0;
msi->irq = 0;
zpci_msi_set_mask_bits(msi, 1, 1);
spin_lock(&msi_map_lock);
map = zdev->msi_map[irq];
hlist_del_rcu(&map->msi_chain);
kfree(map);
zdev->msi_map[irq] = NULL;
spin_unlock(&msi_map_lock);
}
/*
* The msi hash table has 256 entries which is good for 4..20
* devices (a typical device allocates 10 + CPUs MSI's). Maybe make
* the hash table size adjustable later.
*/
int __init zpci_msihash_init(void)
{
unsigned int i;
msi_hash = kmalloc(MSI_HASH_BUCKETS * sizeof(*msi_hash), GFP_KERNEL);
if (!msi_hash)
return -ENOMEM;
for (i = 0; i < MSI_HASH_BUCKETS; i++)
INIT_HLIST_HEAD(&msi_hash[i]);
return 0;
}
void __init zpci_msihash_exit(void)
{
kfree(msi_hash);
}
...@@ -48,11 +48,38 @@ static ssize_t show_pfgid(struct device *dev, struct device_attribute *attr, ...@@ -48,11 +48,38 @@ static ssize_t show_pfgid(struct device *dev, struct device_attribute *attr,
} }
static DEVICE_ATTR(pfgid, S_IRUGO, show_pfgid, NULL); static DEVICE_ATTR(pfgid, S_IRUGO, show_pfgid, NULL);
static void recover_callback(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct zpci_dev *zdev = get_zdev(pdev);
int ret;
pci_stop_and_remove_bus_device(pdev);
ret = zpci_disable_device(zdev);
if (ret)
return;
ret = zpci_enable_device(zdev);
if (ret)
return;
pci_rescan_bus(zdev->bus);
}
static ssize_t store_recover(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int rc = device_schedule_callback(dev, recover_callback);
return rc ? rc : count;
}
static DEVICE_ATTR(recover, S_IWUSR, NULL, store_recover);
static struct device_attribute *zpci_dev_attrs[] = { static struct device_attribute *zpci_dev_attrs[] = {
&dev_attr_function_id, &dev_attr_function_id,
&dev_attr_function_handle, &dev_attr_function_handle,
&dev_attr_pchid, &dev_attr_pchid,
&dev_attr_pfgid, &dev_attr_pfgid,
&dev_attr_recover,
NULL, NULL,
}; };
......
...@@ -146,7 +146,7 @@ config HOTPLUG_PCI_SGI ...@@ -146,7 +146,7 @@ config HOTPLUG_PCI_SGI
When in doubt, say N. When in doubt, say N.
config HOTPLUG_PCI_S390 config HOTPLUG_PCI_S390
tristate "System z PCI Hotplug Support" bool "System z PCI Hotplug Support"
depends on S390 && 64BIT depends on S390 && 64BIT
help help
Say Y here if you want to use the System z PCI Hotplug Say Y here if you want to use the System z PCI Hotplug
......
...@@ -79,8 +79,6 @@ static int enable_slot(struct hotplug_slot *hotplug_slot) ...@@ -79,8 +79,6 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
if (rc) if (rc)
goto out_deconfigure; goto out_deconfigure;
slot->zdev->state = ZPCI_FN_STATE_ONLINE;
pci_scan_slot(slot->zdev->bus, ZPCI_DEVFN); pci_scan_slot(slot->zdev->bus, ZPCI_DEVFN);
pci_bus_add_devices(slot->zdev->bus); pci_bus_add_devices(slot->zdev->bus);
...@@ -148,7 +146,7 @@ static struct hotplug_slot_ops s390_hotplug_slot_ops = { ...@@ -148,7 +146,7 @@ static struct hotplug_slot_ops s390_hotplug_slot_ops = {
.get_adapter_status = get_adapter_status, .get_adapter_status = get_adapter_status,
}; };
static int init_pci_slot(struct zpci_dev *zdev) int zpci_init_slot(struct zpci_dev *zdev)
{ {
struct hotplug_slot *hotplug_slot; struct hotplug_slot *hotplug_slot;
struct hotplug_slot_info *info; struct hotplug_slot_info *info;
...@@ -202,7 +200,7 @@ static int init_pci_slot(struct zpci_dev *zdev) ...@@ -202,7 +200,7 @@ static int init_pci_slot(struct zpci_dev *zdev)
return -ENOMEM; return -ENOMEM;
} }
static void exit_pci_slot(struct zpci_dev *zdev) void zpci_exit_slot(struct zpci_dev *zdev)
{ {
struct list_head *tmp, *n; struct list_head *tmp, *n;
struct slot *slot; struct slot *slot;
...@@ -215,60 +213,3 @@ static void exit_pci_slot(struct zpci_dev *zdev) ...@@ -215,60 +213,3 @@ static void exit_pci_slot(struct zpci_dev *zdev)
pci_hp_deregister(slot->hotplug_slot); pci_hp_deregister(slot->hotplug_slot);
} }
} }
static struct pci_hp_callback_ops hp_ops = {
.create_slot = init_pci_slot,
.remove_slot = exit_pci_slot,
};
static void __init init_pci_slots(void)
{
struct zpci_dev *zdev;
/*
* Create a structure for each slot, and register that slot
* with the pci_hotplug subsystem.
*/
mutex_lock(&zpci_list_lock);
list_for_each_entry(zdev, &zpci_list, entry) {
init_pci_slot(zdev);
}
mutex_unlock(&zpci_list_lock);
}
static void __exit exit_pci_slots(void)
{
struct list_head *tmp, *n;
struct slot *slot;
/*
* Unregister all of our slots with the pci_hotplug subsystem.
* Memory will be freed in release_slot() callback after slot's
* lifespan is finished.
*/
list_for_each_safe(tmp, n, &s390_hotplug_slot_list) {
slot = list_entry(tmp, struct slot, slot_list);
list_del(&slot->slot_list);
pci_hp_deregister(slot->hotplug_slot);
}
}
static int __init pci_hotplug_s390_init(void)
{
if (!s390_pci_probe)
return -EOPNOTSUPP;
zpci_register_hp_ops(&hp_ops);
init_pci_slots();
return 0;
}
static void __exit pci_hotplug_s390_exit(void)
{
exit_pci_slots();
zpci_deregister_hp_ops();
}
module_init(pci_hotplug_s390_init);
module_exit(pci_hotplug_s390_exit);
...@@ -930,7 +930,7 @@ dasd_use_raw_store(struct device *dev, struct device_attribute *attr, ...@@ -930,7 +930,7 @@ dasd_use_raw_store(struct device *dev, struct device_attribute *attr,
if (IS_ERR(devmap)) if (IS_ERR(devmap))
return PTR_ERR(devmap); return PTR_ERR(devmap);
if ((strict_strtoul(buf, 10, &val) != 0) || val > 1) if ((kstrtoul(buf, 10, &val) != 0) || val > 1)
return -EINVAL; return -EINVAL;
spin_lock(&dasd_devmap_lock); spin_lock(&dasd_devmap_lock);
...@@ -1225,7 +1225,7 @@ dasd_expires_store(struct device *dev, struct device_attribute *attr, ...@@ -1225,7 +1225,7 @@ dasd_expires_store(struct device *dev, struct device_attribute *attr,
if (IS_ERR(device)) if (IS_ERR(device))
return -ENODEV; return -ENODEV;
if ((strict_strtoul(buf, 10, &val) != 0) || if ((kstrtoul(buf, 10, &val) != 0) ||
(val > DASD_EXPIRES_MAX) || val == 0) { (val > DASD_EXPIRES_MAX) || val == 0) {
dasd_put_device(device); dasd_put_device(device);
return -EINVAL; return -EINVAL;
...@@ -1265,7 +1265,7 @@ dasd_retries_store(struct device *dev, struct device_attribute *attr, ...@@ -1265,7 +1265,7 @@ dasd_retries_store(struct device *dev, struct device_attribute *attr,
if (IS_ERR(device)) if (IS_ERR(device))
return -ENODEV; return -ENODEV;
if ((strict_strtoul(buf, 10, &val) != 0) || if ((kstrtoul(buf, 10, &val) != 0) ||
(val > DASD_RETRIES_MAX)) { (val > DASD_RETRIES_MAX)) {
dasd_put_device(device); dasd_put_device(device);
return -EINVAL; return -EINVAL;
...@@ -1307,7 +1307,7 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr, ...@@ -1307,7 +1307,7 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr,
if (IS_ERR(device) || !device->block) if (IS_ERR(device) || !device->block)
return -ENODEV; return -ENODEV;
if ((strict_strtoul(buf, 10, &val) != 0) || if ((kstrtoul(buf, 10, &val) != 0) ||
val > UINT_MAX / HZ) { val > UINT_MAX / HZ) {
dasd_put_device(device); dasd_put_device(device);
return -EINVAL; return -EINVAL;
......
...@@ -85,6 +85,8 @@ MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids); ...@@ -85,6 +85,8 @@ MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
static struct ccw_driver dasd_eckd_driver; /* see below */ static struct ccw_driver dasd_eckd_driver; /* see below */
static void *rawpadpage;
#define INIT_CQR_OK 0 #define INIT_CQR_OK 0
#define INIT_CQR_UNFORMATTED 1 #define INIT_CQR_UNFORMATTED 1
#define INIT_CQR_ERROR 2 #define INIT_CQR_ERROR 2
...@@ -3237,18 +3239,26 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev, ...@@ -3237,18 +3239,26 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
unsigned int seg_len, len_to_track_end; unsigned int seg_len, len_to_track_end;
unsigned int first_offs; unsigned int first_offs;
unsigned int cidaw, cplength, datasize; unsigned int cidaw, cplength, datasize;
sector_t first_trk, last_trk; sector_t first_trk, last_trk, sectors;
sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
unsigned int pfx_datasize; unsigned int pfx_datasize;
/* /*
* raw track access needs to be mutiple of 64k and on 64k boundary * raw track access needs to be mutiple of 64k and on 64k boundary
* For read requests we can fix an incorrect alignment by padding
* the request with dummy pages.
*/ */
if ((blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK) != 0) { start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
cqr = ERR_PTR(-EINVAL); end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
goto out; DASD_RAW_SECTORS_PER_TRACK;
} end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) %
if (((blk_rq_pos(req) + blk_rq_sectors(req)) % DASD_RAW_SECTORS_PER_TRACK;
DASD_RAW_SECTORS_PER_TRACK) != 0) { basedev = block->base;
if ((start_padding_sectors || end_padding_sectors) &&
(rq_data_dir(req) == WRITE)) {
DBF_DEV_EVENT(DBF_ERR, basedev,
"raw write not track aligned (%lu,%lu) req %p",
start_padding_sectors, end_padding_sectors, req);
cqr = ERR_PTR(-EINVAL); cqr = ERR_PTR(-EINVAL);
goto out; goto out;
} }
...@@ -3258,7 +3268,6 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev, ...@@ -3258,7 +3268,6 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
DASD_RAW_SECTORS_PER_TRACK; DASD_RAW_SECTORS_PER_TRACK;
trkcount = last_trk - first_trk + 1; trkcount = last_trk - first_trk + 1;
first_offs = 0; first_offs = 0;
basedev = block->base;
if (rq_data_dir(req) == READ) if (rq_data_dir(req) == READ)
cmd = DASD_ECKD_CCW_READ_TRACK; cmd = DASD_ECKD_CCW_READ_TRACK;
...@@ -3307,12 +3316,26 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev, ...@@ -3307,12 +3316,26 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
} }
idaws = (unsigned long *)(cqr->data + pfx_datasize); idaws = (unsigned long *)(cqr->data + pfx_datasize);
len_to_track_end = 0; len_to_track_end = 0;
if (start_padding_sectors) {
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = cmd;
/* maximum 3390 track size */
ccw->count = 57326;
/* 64k map to one track */
len_to_track_end = 65536 - start_padding_sectors * 512;
ccw->cda = (__u32)(addr_t)idaws;
ccw->flags |= CCW_FLAG_IDA;
ccw->flags |= CCW_FLAG_SLI;
ccw++;
for (sectors = 0; sectors < start_padding_sectors; sectors += 8)
idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
}
rq_for_each_segment(bv, req, iter) { rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset; dst = page_address(bv->bv_page) + bv->bv_offset;
seg_len = bv->bv_len; seg_len = bv->bv_len;
if (cmd == DASD_ECKD_CCW_READ_TRACK)
memset(dst, 0, seg_len);
if (!len_to_track_end) { if (!len_to_track_end) {
ccw[-1].flags |= CCW_FLAG_CC; ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = cmd; ccw->cmd_code = cmd;
...@@ -3328,7 +3351,8 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev, ...@@ -3328,7 +3351,8 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
len_to_track_end -= seg_len; len_to_track_end -= seg_len;
idaws = idal_create_words(idaws, dst, seg_len); idaws = idal_create_words(idaws, dst, seg_len);
} }
for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
if (blk_noretry_request(req) || if (blk_noretry_request(req) ||
block->base->features & DASD_FEATURE_FAILFAST) block->base->features & DASD_FEATURE_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
...@@ -4479,12 +4503,19 @@ dasd_eckd_init(void) ...@@ -4479,12 +4503,19 @@ dasd_eckd_init(void)
kfree(dasd_reserve_req); kfree(dasd_reserve_req);
return -ENOMEM; return -ENOMEM;
} }
rawpadpage = (void *)__get_free_page(GFP_KERNEL);
if (!rawpadpage) {
kfree(path_verification_worker);
kfree(dasd_reserve_req);
return -ENOMEM;
}
ret = ccw_driver_register(&dasd_eckd_driver); ret = ccw_driver_register(&dasd_eckd_driver);
if (!ret) if (!ret)
wait_for_device_probe(); wait_for_device_probe();
else { else {
kfree(path_verification_worker); kfree(path_verification_worker);
kfree(dasd_reserve_req); kfree(dasd_reserve_req);
free_page((unsigned long)rawpadpage);
} }
return ret; return ret;
} }
...@@ -4495,6 +4526,7 @@ dasd_eckd_cleanup(void) ...@@ -4495,6 +4526,7 @@ dasd_eckd_cleanup(void)
ccw_driver_unregister(&dasd_eckd_driver); ccw_driver_unregister(&dasd_eckd_driver);
kfree(path_verification_worker); kfree(path_verification_worker);
kfree(dasd_reserve_req); kfree(dasd_reserve_req);
free_page((unsigned long)rawpadpage);
} }
module_init(dasd_eckd_init); module_init(dasd_eckd_init);
......
...@@ -124,10 +124,15 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr) ...@@ -124,10 +124,15 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr) struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr)
{ {
int success; int success;
unsigned long long startclk, stopclk;
struct dasd_device *startdev;
BUG_ON(cqr->refers == NULL || cqr->function == NULL); BUG_ON(cqr->refers == NULL || cqr->function == NULL);
success = cqr->status == DASD_CQR_DONE; success = cqr->status == DASD_CQR_DONE;
startclk = cqr->startclk;
stopclk = cqr->stopclk;
startdev = cqr->startdev;
/* free all ERPs - but NOT the original cqr */ /* free all ERPs - but NOT the original cqr */
while (cqr->refers != NULL) { while (cqr->refers != NULL) {
...@@ -142,6 +147,9 @@ struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr) ...@@ -142,6 +147,9 @@ struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr)
} }
/* set corresponding status to original cqr */ /* set corresponding status to original cqr */
cqr->startclk = startclk;
cqr->stopclk = stopclk;
cqr->startdev = startdev;
if (success) if (success)
cqr->status = DASD_CQR_DONE; cqr->status = DASD_CQR_DONE;
else { else {
...@@ -160,11 +168,13 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb) ...@@ -160,11 +168,13 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
device = cqr->startdev; device = cqr->startdev;
if (cqr->intrc == -ETIMEDOUT) { if (cqr->intrc == -ETIMEDOUT) {
dev_err(&device->cdev->dev, "cqr %p timeout error", cqr); dev_err(&device->cdev->dev,
"A timeout error occurred for cqr %p", cqr);
return; return;
} }
if (cqr->intrc == -ENOLINK) { if (cqr->intrc == -ENOLINK) {
dev_err(&device->cdev->dev, "cqr %p transport error", cqr); dev_err(&device->cdev->dev,
"A transport error occurred for cqr %p", cqr);
return; return;
} }
/* dump sense data */ /* dump sense data */
......
...@@ -32,7 +32,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work) ...@@ -32,7 +32,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
struct device *dev; struct device *dev;
s390_adjust_jiffies(); s390_adjust_jiffies();
pr_warning("cpu capability changed.\n"); pr_info("CPU capability may have changed\n");
get_online_cpus(); get_online_cpus();
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
dev = get_cpu_device(cpu); dev = get_cpu_device(cpu);
......
...@@ -81,15 +81,185 @@ void unregister_adapter_interrupt(struct airq_struct *airq) ...@@ -81,15 +81,185 @@ void unregister_adapter_interrupt(struct airq_struct *airq)
} }
EXPORT_SYMBOL(unregister_adapter_interrupt); EXPORT_SYMBOL(unregister_adapter_interrupt);
void do_adapter_IO(u8 isc) static irqreturn_t do_airq_interrupt(int irq, void *dummy)
{ {
struct tpi_info *tpi_info;
struct airq_struct *airq; struct airq_struct *airq;
struct hlist_head *head; struct hlist_head *head;
head = &airq_lists[isc]; __this_cpu_write(s390_idle.nohz_delay, 1);
tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
head = &airq_lists[tpi_info->isc];
rcu_read_lock(); rcu_read_lock();
hlist_for_each_entry_rcu(airq, head, list) hlist_for_each_entry_rcu(airq, head, list)
if ((*airq->lsi_ptr & airq->lsi_mask) != 0) if ((*airq->lsi_ptr & airq->lsi_mask) != 0)
airq->handler(airq); airq->handler(airq);
rcu_read_unlock(); rcu_read_unlock();
return IRQ_HANDLED;
}
static struct irqaction airq_interrupt = {
.name = "AIO",
.handler = do_airq_interrupt,
};
void __init init_airq_interrupts(void)
{
irq_set_chip_and_handler(THIN_INTERRUPT,
&dummy_irq_chip, handle_percpu_irq);
setup_irq(THIN_INTERRUPT, &airq_interrupt);
}
/**
* airq_iv_create - create an interrupt vector
* @bits: number of bits in the interrupt vector
* @flags: allocation flags
*
* Returns a pointer to an interrupt vector structure
*/
struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags)
{
struct airq_iv *iv;
unsigned long size;
iv = kzalloc(sizeof(*iv), GFP_KERNEL);
if (!iv)
goto out;
iv->bits = bits;
size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
iv->vector = kzalloc(size, GFP_KERNEL);
if (!iv->vector)
goto out_free;
if (flags & AIRQ_IV_ALLOC) {
iv->avail = kmalloc(size, GFP_KERNEL);
if (!iv->avail)
goto out_free;
memset(iv->avail, 0xff, size);
iv->end = 0;
} else
iv->end = bits;
if (flags & AIRQ_IV_BITLOCK) {
iv->bitlock = kzalloc(size, GFP_KERNEL);
if (!iv->bitlock)
goto out_free;
}
if (flags & AIRQ_IV_PTR) {
size = bits * sizeof(unsigned long);
iv->ptr = kzalloc(size, GFP_KERNEL);
if (!iv->ptr)
goto out_free;
}
if (flags & AIRQ_IV_DATA) {
size = bits * sizeof(unsigned int);
iv->data = kzalloc(size, GFP_KERNEL);
if (!iv->data)
goto out_free;
}
spin_lock_init(&iv->lock);
return iv;
out_free:
kfree(iv->ptr);
kfree(iv->bitlock);
kfree(iv->avail);
kfree(iv->vector);
kfree(iv);
out:
return NULL;
}
EXPORT_SYMBOL(airq_iv_create);
/**
* airq_iv_release - release an interrupt vector
* @iv: pointer to interrupt vector structure
*/
void airq_iv_release(struct airq_iv *iv)
{
kfree(iv->data);
kfree(iv->ptr);
kfree(iv->bitlock);
kfree(iv->vector);
kfree(iv->avail);
kfree(iv);
}
EXPORT_SYMBOL(airq_iv_release);
/**
* airq_iv_alloc_bit - allocate an irq bit from an interrupt vector
* @iv: pointer to an interrupt vector structure
*
* Returns the bit number of the allocated irq, or -1UL if no bit
* is available or the AIRQ_IV_ALLOC flag has not been specified
*/
unsigned long airq_iv_alloc_bit(struct airq_iv *iv)
{
const unsigned long be_to_le = BITS_PER_LONG - 1;
unsigned long bit;
if (!iv->avail)
return -1UL;
spin_lock(&iv->lock);
bit = find_first_bit_left(iv->avail, iv->bits);
if (bit < iv->bits) {
clear_bit(bit ^ be_to_le, iv->avail);
if (bit >= iv->end)
iv->end = bit + 1;
} else
bit = -1UL;
spin_unlock(&iv->lock);
return bit;
}
EXPORT_SYMBOL(airq_iv_alloc_bit);
/**
* airq_iv_free_bit - free an irq bit of an interrupt vector
* @iv: pointer to interrupt vector structure
* @bit: number of the irq bit to free
*/
void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit)
{
const unsigned long be_to_le = BITS_PER_LONG - 1;
if (!iv->avail)
return;
spin_lock(&iv->lock);
/* Clear (possibly left over) interrupt bit */
clear_bit(bit ^ be_to_le, iv->vector);
/* Make the bit position available again */
set_bit(bit ^ be_to_le, iv->avail);
if (bit == iv->end - 1) {
/* Find new end of bit-field */
while (--iv->end > 0)
if (!test_bit((iv->end - 1) ^ be_to_le, iv->avail))
break;
}
spin_unlock(&iv->lock);
}
EXPORT_SYMBOL(airq_iv_free_bit);
/**
* airq_iv_scan - scan interrupt vector for non-zero bits
* @iv: pointer to interrupt vector structure
* @start: bit number to start the search
* @end: bit number to end the search
*
* Returns the bit number of the next non-zero interrupt bit, or
* -1UL if the scan completed without finding any more any non-zero bits.
*/
unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
unsigned long end)
{
const unsigned long be_to_le = BITS_PER_LONG - 1;
unsigned long bit;
/* Find non-zero bit starting from 'ivs->next'. */
bit = find_next_bit_left(iv->vector, end, start);
if (bit >= end)
return -1UL;
/* Clear interrupt bit (find left uses big-endian bit numbers) */
clear_bit(bit ^ be_to_le, iv->vector);
return bit;
} }
EXPORT_SYMBOL(airq_iv_scan);
...@@ -137,7 +137,7 @@ static ssize_t ccwgroup_online_store(struct device *dev, ...@@ -137,7 +137,7 @@ static ssize_t ccwgroup_online_store(struct device *dev,
if (!try_module_get(gdrv->driver.owner)) if (!try_module_get(gdrv->driver.owner))
return -EINVAL; return -EINVAL;
ret = strict_strtoul(buf, 0, &value); ret = kstrtoul(buf, 0, &value);
if (ret) if (ret)
goto out; goto out;
......
...@@ -561,37 +561,23 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) ...@@ -561,37 +561,23 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
} }
/* /*
* do_IRQ() handles all normal I/O device IRQ's (the special * do_cio_interrupt() handles all normal I/O device IRQ's
* SMP cross-CPU interrupts have their own specific
* handlers).
*
*/ */
void __irq_entry do_IRQ(struct pt_regs *regs) static irqreturn_t do_cio_interrupt(int irq, void *dummy)
{ {
struct tpi_info *tpi_info = (struct tpi_info *) &regs->int_code; struct tpi_info *tpi_info;
struct subchannel *sch; struct subchannel *sch;
struct irb *irb; struct irb *irb;
struct pt_regs *old_regs;
old_regs = set_irq_regs(regs);
irq_enter();
__this_cpu_write(s390_idle.nohz_delay, 1); __this_cpu_write(s390_idle.nohz_delay, 1);
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
/* Serve timer interrupts first. */
clock_comparator_work();
kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL);
irb = (struct irb *) &S390_lowcore.irb; irb = (struct irb *) &S390_lowcore.irb;
if (tpi_info->adapter_IO) {
do_adapter_IO(tpi_info->isc);
goto out;
}
sch = (struct subchannel *)(unsigned long) tpi_info->intparm; sch = (struct subchannel *)(unsigned long) tpi_info->intparm;
if (!sch) { if (!sch) {
/* Clear pending interrupt condition. */ /* Clear pending interrupt condition. */
inc_irq_stat(IRQIO_CIO); inc_irq_stat(IRQIO_CIO);
tsch(tpi_info->schid, irb); tsch(tpi_info->schid, irb);
goto out; return IRQ_HANDLED;
} }
spin_lock(sch->lock); spin_lock(sch->lock);
/* Store interrupt response block to lowcore. */ /* Store interrupt response block to lowcore. */
...@@ -606,9 +592,23 @@ void __irq_entry do_IRQ(struct pt_regs *regs) ...@@ -606,9 +592,23 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
} else } else
inc_irq_stat(IRQIO_CIO); inc_irq_stat(IRQIO_CIO);
spin_unlock(sch->lock); spin_unlock(sch->lock);
out:
irq_exit(); return IRQ_HANDLED;
set_irq_regs(old_regs); }
static struct irq_desc *irq_desc_io;
static struct irqaction io_interrupt = {
.name = "IO",
.handler = do_cio_interrupt,
};
void __init init_cio_interrupts(void)
{
irq_set_chip_and_handler(IO_INTERRUPT,
&dummy_irq_chip, handle_percpu_irq);
setup_irq(IO_INTERRUPT, &io_interrupt);
irq_desc_io = irq_to_desc(IO_INTERRUPT);
} }
#ifdef CONFIG_CCW_CONSOLE #ifdef CONFIG_CCW_CONSOLE
...@@ -635,7 +635,7 @@ void cio_tsch(struct subchannel *sch) ...@@ -635,7 +635,7 @@ void cio_tsch(struct subchannel *sch)
local_bh_disable(); local_bh_disable();
irq_enter(); irq_enter();
} }
kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL); kstat_incr_irqs_this_cpu(IO_INTERRUPT, irq_desc_io);
if (sch->driver && sch->driver->irq) if (sch->driver && sch->driver->irq)
sch->driver->irq(sch); sch->driver->irq(sch);
else else
......
...@@ -121,9 +121,6 @@ extern int cio_commit_config(struct subchannel *sch); ...@@ -121,9 +121,6 @@ extern int cio_commit_config(struct subchannel *sch);
int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key); int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
int cio_tm_intrg(struct subchannel *sch); int cio_tm_intrg(struct subchannel *sch);
void do_adapter_IO(u8 isc);
void do_IRQ(struct pt_regs *);
/* Use with care. */ /* Use with care. */
#ifdef CONFIG_CCW_CONSOLE #ifdef CONFIG_CCW_CONSOLE
extern struct subchannel *cio_probe_console(void); extern struct subchannel *cio_probe_console(void);
......
...@@ -1182,7 +1182,7 @@ static ssize_t cmb_enable_store(struct device *dev, ...@@ -1182,7 +1182,7 @@ static ssize_t cmb_enable_store(struct device *dev,
int ret; int ret;
unsigned long val; unsigned long val;
ret = strict_strtoul(buf, 16, &val); ret = kstrtoul(buf, 16, &val);
if (ret) if (ret)
return ret; return ret;
......
...@@ -546,7 +546,9 @@ static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) ...@@ -546,7 +546,9 @@ static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
case -ENOMEM: case -ENOMEM:
case -EIO: case -EIO:
/* These should abort looping */ /* These should abort looping */
spin_lock_irq(&slow_subchannel_lock);
idset_sch_del_subseq(slow_subchannel_set, schid); idset_sch_del_subseq(slow_subchannel_set, schid);
spin_unlock_irq(&slow_subchannel_lock);
break; break;
default: default:
rc = 0; rc = 0;
...@@ -740,7 +742,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr, ...@@ -740,7 +742,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
int ret; int ret;
unsigned long val; unsigned long val;
ret = strict_strtoul(buf, 16, &val); ret = kstrtoul(buf, 16, &val);
if (ret) if (ret)
return ret; return ret;
mutex_lock(&css->mutex); mutex_lock(&css->mutex);
......
...@@ -130,8 +130,6 @@ struct channel_subsystem { ...@@ -130,8 +130,6 @@ struct channel_subsystem {
extern struct channel_subsystem *channel_subsystems[]; extern struct channel_subsystem *channel_subsystems[];
void channel_subsystem_reinit(void);
/* Helper functions to build lists for the slow path. */ /* Helper functions to build lists for the slow path. */
void css_schedule_eval(struct subchannel_id schid); void css_schedule_eval(struct subchannel_id schid);
void css_schedule_eval_all(void); void css_schedule_eval_all(void);
......
...@@ -564,7 +564,7 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr, ...@@ -564,7 +564,7 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
ret = 0; ret = 0;
} else { } else {
force = 0; force = 0;
ret = strict_strtoul(buf, 16, &i); ret = kstrtoul(buf, 16, &i);
} }
if (ret) if (ret)
goto out; goto out;
......
...@@ -208,7 +208,7 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev, ...@@ -208,7 +208,7 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
goto out; goto out;
} }
rc = strict_strtoul(buf, 16, &i); rc = kstrtoul(buf, 16, &i);
if (rc) { if (rc) {
rc = -EINVAL; rc = -EINVAL;
goto out; goto out;
......
...@@ -208,10 +208,6 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) ...@@ -208,10 +208,6 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif #endif
#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
#define page_test_and_clear_young(pfn) (0)
#endif
#ifndef __HAVE_ARCH_PGD_OFFSET_GATE #ifndef __HAVE_ARCH_PGD_OFFSET_GATE
#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
#endif #endif
......
...@@ -873,9 +873,6 @@ int page_referenced(struct page *page, ...@@ -873,9 +873,6 @@ int page_referenced(struct page *page,
vm_flags); vm_flags);
if (we_locked) if (we_locked)
unlock_page(page); unlock_page(page);
if (page_test_and_clear_young(page_to_pfn(page)))
referenced++;
} }
out: out:
return referenced; return referenced;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment