Commit cbe619b1 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6

* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6: (23 commits)
  [SPARC64]: virt_to_real_irq_table --> virt_irq_table
  [SPARC64]: virt_irq --> bucket mapping no longer necessary
  [SPARC64]: Kill ugly __bucket() macro.
  [SPARC64]: Kill ugly __irq_ino() macro.
  [SPARC64]: Only use bypass accesses to INO buckets.
  [SPARC64]: Update defconfig.
  [SPARC64]: Use sun4v VIRQ interfaces as intended.
  [SPARC64]: Allocate ivector_table dynamically.
  [SPARC64]: Access ivector_table[] using physical addresses.
  [SPARC64]: Make IVEC pointers 64-bit.
  [SPARC64]: Fix register usage in xor_raid_4().
  [SPARC64]: Kill pci_memspace_mask.
  [SPARC64]: Consolidate MSI support code.
  [SPARC/64]: Move of_platform_driver initialisations: arch/sparc{,64}.
  [SPARC64]: Fix bugs in SYSV IPC handling in 64-bit processes.
  [SPARC/64]: Prepare to remove of_platform_driver name.
  [SPARC32]: Add irqflags.h to sparc32 and use it from generic code.
  [SPARC64]: beautify vmlinux.lds
  [SPARC]: beautify vmlinux.lds
  [SPARC64]: Enable MSI on sun4u Fire PCI-E controllers.
  ...
parents 5dd80d5d 45b3f4cc
......@@ -67,10 +67,12 @@ probe in an SBUS driver under Linux:
MODULE_DEVICE_TABLE(of, mydevice_match);
static struct of_platform_driver mydevice_driver = {
.name = "mydevice",
.match_table = mydevice_match,
.probe = mydevice_probe,
.remove = __devexit_p(mydevice_remove),
.driver = {
.name = "mydevice",
},
};
static int __init mydevice_init(void)
......
menu "Kernel hacking"
config TRACE_IRQFLAGS_SUPPORT
bool
default y
source "lib/Kconfig.debug"
config DEBUG_STACK_USAGE
......
......@@ -56,7 +56,7 @@
#define SMP_NOP2
#define SMP_NOP3
#endif /* SMP */
unsigned long __local_irq_save(void)
unsigned long __raw_local_irq_save(void)
{
unsigned long retval;
unsigned long tmp;
......@@ -74,7 +74,7 @@ unsigned long __local_irq_save(void)
return retval;
}
void local_irq_enable(void)
void raw_local_irq_enable(void)
{
unsigned long tmp;
......@@ -89,7 +89,7 @@ void local_irq_enable(void)
: "memory");
}
void local_irq_restore(unsigned long old_psr)
void raw_local_irq_restore(unsigned long old_psr)
{
unsigned long tmp;
......@@ -105,9 +105,9 @@ void local_irq_restore(unsigned long old_psr)
: "memory");
}
EXPORT_SYMBOL(__local_irq_save);
EXPORT_SYMBOL(local_irq_enable);
EXPORT_SYMBOL(local_irq_restore);
EXPORT_SYMBOL(__raw_local_irq_save);
EXPORT_SYMBOL(raw_local_irq_enable);
EXPORT_SYMBOL(raw_local_irq_restore);
/*
* Dave Redman (djhr@tadpole.co.uk)
......
......@@ -588,7 +588,10 @@ __setup("of_debug=", of_debug);
int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus)
{
/* initialize common driver fields */
drv->driver.name = drv->name;
if (!drv->driver.name)
drv->driver.name = drv->name;
if (!drv->driver.owner)
drv->driver.owner = drv->owner;
drv->driver.bus = bus;
/* register with core */
......
......@@ -347,9 +347,11 @@ static struct of_device_id clock_match[] = {
};
static struct of_platform_driver clock_driver = {
.name = "clock",
.match_table = clock_match,
.probe = clock_probe,
.driver = {
.name = "clock",
},
};
......
/* ld script to make SparcLinux kernel */
#include <asm-generic/vmlinux.lds.h>
#include <asm/page.h>
OUTPUT_FORMAT("elf32-sparc", "elf32-sparc", "elf32-sparc")
OUTPUT_ARCH(sparc)
......@@ -8,84 +9,104 @@ ENTRY(_start)
jiffies = jiffies_64 + 4;
SECTIONS
{
. = 0x10000 + SIZEOF_HEADERS;
.text 0xf0004000 :
{
_text = .;
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
*(.gnu.warning)
} =0
_etext = .;
PROVIDE (etext = .);
RODATA
.data :
{
DATA_DATA
CONSTRUCTORS
}
.data1 : { *(.data1) }
_edata = .;
PROVIDE (edata = .);
__start___fixup = .;
.fixup : { *(.fixup) }
__stop___fixup = .;
__start___ex_table = .;
__ex_table : { *(__ex_table) }
__stop___ex_table = .;
. = 0x10000 + SIZEOF_HEADERS;
.text 0xf0004000 :
{
_text = .;
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
*(.gnu.warning)
} = 0
_etext = .;
PROVIDE (etext = .);
RODATA
.data : {
DATA_DATA
CONSTRUCTORS
}
.data1 : {
*(.data1)
}
_edata = .;
PROVIDE (edata = .);
NOTES
.fixup : {
__start___fixup = .;
*(.fixup)
__stop___fixup = .;
}
__ex_table : {
__start___ex_table = .;
*(__ex_table)
__stop___ex_table = .;
}
. = ALIGN(4096);
__init_begin = .;
_sinittext = .;
.init.text : {
*(.init.text)
}
_einittext = .;
__init_text_end = .;
.init.data : { *(.init.data) }
. = ALIGN(16);
__setup_start = .;
.init.setup : { *(.init.setup) }
__setup_end = .;
__initcall_start = .;
.initcall.init : {
INITCALLS
}
__initcall_end = .;
__con_initcall_start = .;
.con_initcall.init : { *(.con_initcall.init) }
__con_initcall_end = .;
SECURITY_INIT
NOTES
. = ALIGN(PAGE_SIZE);
__init_begin = .;
.init.text : {
_sinittext = .;
*(.init.text)
_einittext = .;
}
__init_text_end = .;
.init.data : {
*(.init.data)
}
. = ALIGN(16);
.init.setup : {
__setup_start = .;
*(.init.setup)
__setup_end = .;
}
.initcall.init : {
__initcall_start = .;
INITCALLS
__initcall_end = .;
}
.con_initcall.init : {
__con_initcall_start = .;
*(.con_initcall.init)
__con_initcall_end = .;
}
SECURITY_INIT
#ifdef CONFIG_BLK_DEV_INITRD
. = ALIGN(4096);
__initramfs_start = .;
.init.ramfs : { *(.init.ramfs) }
__initramfs_end = .;
. = ALIGN(PAGE_SIZE);
.init.ramfs : {
__initramfs_start = .;
*(.init.ramfs)
__initramfs_end = .;
}
#endif
PERCPU(4096)
. = ALIGN(4096);
__init_end = .;
. = ALIGN(32);
.data.cacheline_aligned : { *(.data.cacheline_aligned) }
__bss_start = .;
.sbss : { *(.sbss) *(.scommon) }
.bss :
{
*(.dynbss)
*(.bss)
*(COMMON)
}
_end = . ;
PROVIDE (end = .);
/DISCARD/ : { *(.exit.text) *(.exit.data) *(.exitcall.exit) }
PERCPU(PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .;
. = ALIGN(32);
.data.cacheline_aligned : {
*(.data.cacheline_aligned)
}
STABS_DEBUG
__bss_start = .;
.sbss : {
*(.sbss)
*(.scommon) }
.bss : {
*(.dynbss)
*(.bss)
*(COMMON)
}
_end = . ;
PROVIDE (end = .);
/DISCARD/ : {
*(.exit.text)
*(.exit.data)
*(.exitcall.exit)
}
DWARF_DEBUG
STABS_DEBUG
DWARF_DEBUG
}
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.23-rc6
# Sun Sep 16 09:52:11 2007
# Linux kernel version: 2.6.23
# Sat Oct 13 21:53:54 2007
#
CONFIG_SPARC=y
CONFIG_SPARC64=y
......@@ -69,7 +69,6 @@ CONFIG_FUTEX=y
CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_VM_EVENT_COUNTERS=y
......@@ -89,6 +88,7 @@ CONFIG_KMOD=y
CONFIG_BLOCK=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_BLK_DEV_BSG=y
CONFIG_BLOCK_COMPAT=y
#
# IO Schedulers
......@@ -111,6 +111,7 @@ CONFIG_GENERIC_HARDIRQS=y
CONFIG_TICK_ONESHOT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
# CONFIG_SMP is not set
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_TABLE=m
......@@ -119,6 +120,8 @@ CONFIG_CPU_FREQ_STAT=m
CONFIG_CPU_FREQ_STAT_DETAILS=y
CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=m
CONFIG_CPU_FREQ_GOV_USERSPACE=m
......@@ -213,6 +216,7 @@ CONFIG_INET_TUNNEL=y
CONFIG_INET_XFRM_MODE_TRANSPORT=y
CONFIG_INET_XFRM_MODE_TUNNEL=y
CONFIG_INET_XFRM_MODE_BEET=y
CONFIG_INET_LRO=y
CONFIG_INET_DIAG=y
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
......@@ -304,6 +308,7 @@ CONFIG_NET_TCPPROBE=m
#
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_STANDALONE=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_FW_LOADER=y
......@@ -355,6 +360,11 @@ CONFIG_IDE_PROC_FS=y
# IDE chipset support/bugfixes
#
CONFIG_IDE_GENERIC=y
# CONFIG_BLK_DEV_PLATFORM is not set
#
# PCI IDE chipsets support
#
CONFIG_BLK_DEV_IDEPCI=y
# CONFIG_IDEPCI_SHARE_IRQ is not set
CONFIG_IDEPCI_PCIBUS_ORDER=y
......@@ -391,7 +401,6 @@ CONFIG_BLK_DEV_ALI15X3=y
# CONFIG_BLK_DEV_TC86C001 is not set
# CONFIG_IDE_ARM is not set
CONFIG_BLK_DEV_IDEDMA=y
# CONFIG_IDEDMA_IVB is not set
# CONFIG_BLK_DEV_HD is not set
#
......@@ -505,6 +514,8 @@ CONFIG_DUMMY=m
# CONFIG_MACVLAN is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
# CONFIG_VETH is not set
# CONFIG_IP1000 is not set
# CONFIG_ARCNET is not set
# CONFIG_PHYLIB is not set
CONFIG_NET_ETHERNET=y
......@@ -518,13 +529,16 @@ CONFIG_CASSINI=m
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_NET_TULIP is not set
# CONFIG_HP100 is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
# CONFIG_B44 is not set
# CONFIG_FORCEDETH is not set
# CONFIG_DGRS is not set
# CONFIG_EEPRO100 is not set
# CONFIG_E100 is not set
# CONFIG_FEALNX is not set
......@@ -543,6 +557,7 @@ CONFIG_NETDEV_1000=y
CONFIG_E1000=m
CONFIG_E1000_NAPI=y
# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
# CONFIG_E1000E is not set
# CONFIG_MYRI_SBUS is not set
# CONFIG_NS83820 is not set
# CONFIG_HAMACHI is not set
......@@ -560,11 +575,14 @@ CONFIG_BNX2=m
CONFIG_NETDEV_10000=y
# CONFIG_CHELSIO_T1 is not set
# CONFIG_CHELSIO_T3 is not set
# CONFIG_IXGBE is not set
# CONFIG_IXGB is not set
# CONFIG_S2IO is not set
# CONFIG_MYRI10GE is not set
# CONFIG_NETXEN_NIC is not set
# CONFIG_NIU is not set
# CONFIG_MLX4_CORE is not set
# CONFIG_TEHUTI is not set
# CONFIG_TR is not set
#
......@@ -819,6 +837,12 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_W83627EHF is not set
# CONFIG_HWMON_DEBUG_CHIP is not set
#
# Sonics Silicon Backplane
#
CONFIG_SSB_POSSIBLE=y
# CONFIG_SSB is not set
#
# Multifunction device drivers
#
......@@ -1399,6 +1423,7 @@ CONFIG_ASYNC_MEMCPY=m
CONFIG_ASYNC_XOR=m
CONFIG_CRYPTO=y
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_AEAD=m
CONFIG_CRYPTO_BLKCIPHER=y
CONFIG_CRYPTO_HASH=y
CONFIG_CRYPTO_MANAGER=y
......@@ -1417,6 +1442,7 @@ CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_XTS=m
# CONFIG_CRYPTO_CRYPTD is not set
CONFIG_CRYPTO_DES=y
CONFIG_CRYPTO_FCRYPT=m
......@@ -1431,11 +1457,13 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_CRC32C=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_AUTHENC=m
CONFIG_CRYPTO_HW=y
#
......
......@@ -18,6 +18,7 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o \
pci_psycho.o pci_sabre.o pci_schizo.o \
pci_sun4v.o pci_sun4v_asm.o pci_fire.o
obj-$(CONFIG_PCI_MSI) += pci_msi.o
obj-$(CONFIG_SMP) += smp.o trampoline.o hvtramp.o
obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o
obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
......
......@@ -148,9 +148,11 @@ static int __devinit auxio_probe(struct of_device *dev, const struct of_device_i
}
static struct of_platform_driver auxio_driver = {
.name = "auxio",
.match_table = auxio_match,
.probe = auxio_probe,
.driver = {
.name = "auxio",
},
};
static int __init auxio_init(void)
......
......@@ -429,16 +429,16 @@ do_ivec:
stxa %g0, [%g0] ASI_INTR_RECEIVE
membar #Sync
sethi %hi(ivector_table), %g2
sllx %g3, 3, %g3
or %g2, %lo(ivector_table), %g2
sethi %hi(ivector_table_pa), %g2
ldx [%g2 + %lo(ivector_table_pa)], %g2
sllx %g3, 4, %g3
add %g2, %g3, %g3
TRAP_LOAD_IRQ_WORK(%g6, %g1)
TRAP_LOAD_IRQ_WORK_PA(%g6, %g1)
lduw [%g6], %g5 /* g5 = irq_work(cpu) */
stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */
stw %g3, [%g6] /* irq_work(cpu) = bucket */
ldx [%g6], %g5
stxa %g5, [%g3] ASI_PHYS_USE_EC
stx %g3, [%g6]
wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
retry
do_ivec_xcall:
......
......@@ -21,7 +21,6 @@
#include <linux/seq_file.h>
#include <linux/bootmem.h>
#include <linux/irq.h>
#include <linux/msi.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
......@@ -43,6 +42,7 @@
#include <asm/auxio.h>
#include <asm/head.h>
#include <asm/hypervisor.h>
#include <asm/cacheflush.h>
/* UPA nodes send interrupt packet to UltraSparc with first data reg
* value low 5 (7 on Starfire) bits holding the IRQ identifier being
......@@ -52,86 +52,128 @@
* To make processing these packets efficient and race free we use
* an array of irq buckets below. The interrupt vector handler in
* entry.S feeds incoming packets into per-cpu pil-indexed lists.
* The IVEC handler does not need to act atomically, the PIL dispatch
* code uses CAS to get an atomic snapshot of the list and clear it
* at the same time.
*
* If you make changes to ino_bucket, please update hand coded assembler
* of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
*/
struct ino_bucket {
/* Next handler in per-CPU IRQ worklist. We know that
* bucket pointers have the high 32-bits clear, so to
* save space we only store the bits we need.
*/
/*0x00*/unsigned int irq_chain;
/*0x00*/unsigned long __irq_chain_pa;
/* Virtual interrupt number assigned to this INO. */
/*0x04*/unsigned int virt_irq;
/*0x08*/unsigned int __virt_irq;
/*0x0c*/unsigned int __pad;
};
#define NUM_IVECS (IMAP_INR + 1)
struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
#define __irq_ino(irq) \
(((struct ino_bucket *)(unsigned long)(irq)) - &ivector_table[0])
#define __bucket(irq) ((struct ino_bucket *)(unsigned long)(irq))
#define __irq(bucket) ((unsigned int)(unsigned long)(bucket))
/* This has to be in the main kernel image, it cannot be
* turned into per-cpu data. The reason is that the main
* kernel image is locked into the TLB and this structure
* is accessed from the vectored interrupt trap handler. If
* access to this structure takes a TLB miss it could cause
* the 5-level sparc v9 trap stack to overflow.
struct ino_bucket *ivector_table;
unsigned long ivector_table_pa;
/* On several sun4u processors, it is illegal to mix bypass and
* non-bypass accesses. Therefore we access all INO buckets
* using bypass accesses only.
*/
#define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist)
static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
{
unsigned long ret;
__asm__ __volatile__("ldxa [%1] %2, %0"
: "=&r" (ret)
: "r" (bucket_pa +
offsetof(struct ino_bucket,
__irq_chain_pa)),
"i" (ASI_PHYS_USE_EC));
return ret;
}
static void bucket_clear_chain_pa(unsigned long bucket_pa)
{
__asm__ __volatile__("stxa %%g0, [%0] %1"
: /* no outputs */
: "r" (bucket_pa +
offsetof(struct ino_bucket,
__irq_chain_pa)),
"i" (ASI_PHYS_USE_EC));
}
static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
{
unsigned int ret;
__asm__ __volatile__("lduwa [%1] %2, %0"
: "=&r" (ret)
: "r" (bucket_pa +
offsetof(struct ino_bucket,
__virt_irq)),
"i" (ASI_PHYS_USE_EC));
return ret;
}
static void bucket_set_virt_irq(unsigned long bucket_pa,
unsigned int virt_irq)
{
__asm__ __volatile__("stwa %0, [%1] %2"
: /* no outputs */
: "r" (virt_irq),
"r" (bucket_pa +
offsetof(struct ino_bucket,
__virt_irq)),
"i" (ASI_PHYS_USE_EC));
}
#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
static struct {
unsigned int irq;
unsigned int dev_handle;
unsigned int dev_ino;
} virt_to_real_irq_table[NR_IRQS];
unsigned int in_use;
} virt_irq_table[NR_IRQS];
static DEFINE_SPINLOCK(virt_irq_alloc_lock);
static unsigned char virt_irq_alloc(unsigned int real_irq)
unsigned char virt_irq_alloc(unsigned int dev_handle,
unsigned int dev_ino)
{
unsigned long flags;
unsigned char ent;
BUILD_BUG_ON(NR_IRQS >= 256);
spin_lock_irqsave(&virt_irq_alloc_lock, flags);
for (ent = 1; ent < NR_IRQS; ent++) {
if (!virt_to_real_irq_table[ent].irq)
if (!virt_irq_table[ent].in_use)
break;
}
if (ent >= NR_IRQS) {
printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
return 0;
ent = 0;
} else {
virt_irq_table[ent].dev_handle = dev_handle;
virt_irq_table[ent].dev_ino = dev_ino;
virt_irq_table[ent].in_use = 1;
}
virt_to_real_irq_table[ent].irq = real_irq;
spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
return ent;
}
#ifdef CONFIG_PCI_MSI
static void virt_irq_free(unsigned int virt_irq)
void virt_irq_free(unsigned int virt_irq)
{
unsigned int real_irq;
unsigned long flags;
if (virt_irq >= NR_IRQS)
return;
real_irq = virt_to_real_irq_table[virt_irq].irq;
virt_to_real_irq_table[virt_irq].irq = 0;
spin_lock_irqsave(&virt_irq_alloc_lock, flags);
__bucket(real_irq)->virt_irq = 0;
}
#endif
virt_irq_table[virt_irq].in_use = 0;
static unsigned int virt_to_real_irq(unsigned char virt_irq)
{
return virt_to_real_irq_table[virt_irq].irq;
spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
}
#endif
/*
* /proc/interrupts printing:
......@@ -217,38 +259,8 @@ struct irq_handler_data {
void (*pre_handler)(unsigned int, void *, void *);
void *pre_handler_arg1;
void *pre_handler_arg2;
u32 msi;
};
void sparc64_set_msi(unsigned int virt_irq, u32 msi)
{
struct irq_handler_data *data = get_irq_chip_data(virt_irq);
if (data)
data->msi = msi;
}
u32 sparc64_get_msi(unsigned int virt_irq)
{
struct irq_handler_data *data = get_irq_chip_data(virt_irq);
if (data)
return data->msi;
return 0xffffffff;
}
static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq)
{
unsigned int real_irq = virt_to_real_irq(virt_irq);
struct ino_bucket *bucket = NULL;
if (likely(real_irq))
bucket = __bucket(real_irq);
return bucket;
}
#ifdef CONFIG_SMP
static int irq_choose_cpu(unsigned int virt_irq)
{
......@@ -348,201 +360,152 @@ static void sun4u_irq_end(unsigned int virt_irq)
static void sun4v_irq_enable(unsigned int virt_irq)
{
struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
unsigned int ino = bucket - &ivector_table[0];
if (likely(bucket)) {
unsigned long cpuid;
int err;
unsigned int ino = virt_irq_table[virt_irq].dev_ino;
unsigned long cpuid = irq_choose_cpu(virt_irq);
int err;
cpuid = irq_choose_cpu(virt_irq);
err = sun4v_intr_settarget(ino, cpuid);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
"err(%d)\n", ino, cpuid, err);
err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_intr_setstate(%x): "
"err(%d)\n", ino, err);
err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
ino, err);
}
err = sun4v_intr_settarget(ino, cpuid);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
"err(%d)\n", ino, cpuid, err);
err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_intr_setstate(%x): "
"err(%d)\n", ino, err);
err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
ino, err);
}
static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask)
{
struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
unsigned int ino = bucket - &ivector_table[0];
unsigned int ino = virt_irq_table[virt_irq].dev_ino;
unsigned long cpuid = irq_choose_cpu(virt_irq);
int err;
if (likely(bucket)) {
unsigned long cpuid;
int err;
cpuid = irq_choose_cpu(virt_irq);
err = sun4v_intr_settarget(ino, cpuid);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
"err(%d)\n", ino, cpuid, err);
}
err = sun4v_intr_settarget(ino, cpuid);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
"err(%d)\n", ino, cpuid, err);
}
static void sun4v_irq_disable(unsigned int virt_irq)
{
struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
unsigned int ino = bucket - &ivector_table[0];
if (likely(bucket)) {
int err;
err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_intr_setenabled(%x): "
"err(%d)\n", ino, err);
}
}
#ifdef CONFIG_PCI_MSI
static void sun4v_msi_enable(unsigned int virt_irq)
{
sun4v_irq_enable(virt_irq);
unmask_msi_irq(virt_irq);
}
unsigned int ino = virt_irq_table[virt_irq].dev_ino;
int err;
static void sun4v_msi_disable(unsigned int virt_irq)
{
mask_msi_irq(virt_irq);
sun4v_irq_disable(virt_irq);
err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_intr_setenabled(%x): "
"err(%d)\n", ino, err);
}
#endif
static void sun4v_irq_end(unsigned int virt_irq)
{
struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
unsigned int ino = bucket - &ivector_table[0];
unsigned int ino = virt_irq_table[virt_irq].dev_ino;
struct irq_desc *desc = irq_desc + virt_irq;
int err;
if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
return;
if (likely(bucket)) {
int err;
err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_intr_setstate(%x): "
"err(%d)\n", ino, err);
}
err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_intr_setstate(%x): "
"err(%d)\n", ino, err);
}
static void sun4v_virq_enable(unsigned int virt_irq)
{
struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
if (likely(bucket)) {
unsigned long cpuid, dev_handle, dev_ino;
int err;
cpuid = irq_choose_cpu(virt_irq);
dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
"err(%d)\n",
dev_handle, dev_ino, cpuid, err);
err = sun4v_vintr_set_state(dev_handle, dev_ino,
HV_INTR_STATE_IDLE);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
"HV_INTR_STATE_IDLE): err(%d)\n",
dev_handle, dev_ino, err);
err = sun4v_vintr_set_valid(dev_handle, dev_ino,
HV_INTR_ENABLED);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
"HV_INTR_ENABLED): err(%d)\n",
dev_handle, dev_ino, err);
}
unsigned long cpuid, dev_handle, dev_ino;
int err;
cpuid = irq_choose_cpu(virt_irq);
dev_handle = virt_irq_table[virt_irq].dev_handle;
dev_ino = virt_irq_table[virt_irq].dev_ino;
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
"err(%d)\n",
dev_handle, dev_ino, cpuid, err);
err = sun4v_vintr_set_state(dev_handle, dev_ino,
HV_INTR_STATE_IDLE);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
"HV_INTR_STATE_IDLE): err(%d)\n",
dev_handle, dev_ino, err);
err = sun4v_vintr_set_valid(dev_handle, dev_ino,
HV_INTR_ENABLED);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
"HV_INTR_ENABLED): err(%d)\n",
dev_handle, dev_ino, err);
}
static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask)
{
struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
unsigned long cpuid, dev_handle, dev_ino;
int err;
if (likely(bucket)) {
unsigned long cpuid, dev_handle, dev_ino;
int err;
cpuid = irq_choose_cpu(virt_irq);
cpuid = irq_choose_cpu(virt_irq);
dev_handle = virt_irq_table[virt_irq].dev_handle;
dev_ino = virt_irq_table[virt_irq].dev_ino;
dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
"err(%d)\n",
dev_handle, dev_ino, cpuid, err);
}
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
"err(%d)\n",
dev_handle, dev_ino, cpuid, err);
}
static void sun4v_virq_disable(unsigned int virt_irq)
{
struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
unsigned long dev_handle, dev_ino;
int err;
if (likely(bucket)) {
unsigned long dev_handle, dev_ino;
int err;
dev_handle = virt_irq_table[virt_irq].dev_handle;
dev_ino = virt_irq_table[virt_irq].dev_ino;
dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
err = sun4v_vintr_set_valid(dev_handle, dev_ino,
HV_INTR_DISABLED);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
"HV_INTR_DISABLED): err(%d)\n",
dev_handle, dev_ino, err);
}
err = sun4v_vintr_set_valid(dev_handle, dev_ino,
HV_INTR_DISABLED);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
"HV_INTR_DISABLED): err(%d)\n",
dev_handle, dev_ino, err);
}
static void sun4v_virq_end(unsigned int virt_irq)
{
struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
struct irq_desc *desc = irq_desc + virt_irq;
unsigned long dev_handle, dev_ino;
int err;
if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
return;
if (likely(bucket)) {
unsigned long dev_handle, dev_ino;
int err;
dev_handle = virt_irq_table[virt_irq].dev_handle;
dev_ino = virt_irq_table[virt_irq].dev_ino;
dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
err = sun4v_vintr_set_state(dev_handle, dev_ino,
HV_INTR_STATE_IDLE);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
"HV_INTR_STATE_IDLE): err(%d)\n",
dev_handle, dev_ino, err);
}
err = sun4v_vintr_set_state(dev_handle, dev_ino,
HV_INTR_STATE_IDLE);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
"HV_INTR_STATE_IDLE): err(%d)\n",
dev_handle, dev_ino, err);
}
static void run_pre_handler(unsigned int virt_irq)
{
struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
struct irq_handler_data *data = get_irq_chip_data(virt_irq);
unsigned int ino;
ino = virt_irq_table[virt_irq].dev_ino;
if (likely(data->pre_handler)) {
data->pre_handler(__irq_ino(__irq(bucket)),
data->pre_handler(ino,
data->pre_handler_arg1,
data->pre_handler_arg2);
}
......@@ -573,28 +536,6 @@ static struct irq_chip sun4v_irq = {
.set_affinity = sun4v_set_affinity,
};
static struct irq_chip sun4v_irq_ack = {
.typename = "sun4v+ack",
.enable = sun4v_irq_enable,
.disable = sun4v_irq_disable,
.ack = run_pre_handler,
.end = sun4v_irq_end,
.set_affinity = sun4v_set_affinity,
};
#ifdef CONFIG_PCI_MSI
static struct irq_chip sun4v_msi = {
.typename = "sun4v+msi",
.mask = mask_msi_irq,
.unmask = unmask_msi_irq,
.enable = sun4v_msi_enable,
.disable = sun4v_msi_disable,
.ack = run_pre_handler,
.end = sun4v_irq_end,
.set_affinity = sun4v_set_affinity,
};
#endif
static struct irq_chip sun4v_virq = {
.typename = "vsun4v",
.enable = sun4v_virq_enable,
......@@ -603,59 +544,48 @@ static struct irq_chip sun4v_virq = {
.set_affinity = sun4v_virt_set_affinity,
};
static struct irq_chip sun4v_virq_ack = {
.typename = "vsun4v+ack",
.enable = sun4v_virq_enable,
.disable = sun4v_virq_disable,
.ack = run_pre_handler,
.end = sun4v_virq_end,
.set_affinity = sun4v_virt_set_affinity,
};
void irq_install_pre_handler(int virt_irq,
void (*func)(unsigned int, void *, void *),
void *arg1, void *arg2)
{
struct irq_handler_data *data = get_irq_chip_data(virt_irq);
struct irq_chip *chip;
struct irq_chip *chip = get_irq_chip(virt_irq);
if (WARN_ON(chip == &sun4v_irq || chip == &sun4v_virq)) {
printk(KERN_ERR "IRQ: Trying to install pre-handler on "
"sun4v irq %u\n", virt_irq);
return;
}
data->pre_handler = func;
data->pre_handler_arg1 = arg1;
data->pre_handler_arg2 = arg2;
chip = get_irq_chip(virt_irq);
if (chip == &sun4u_irq_ack ||
chip == &sun4v_irq_ack ||
chip == &sun4v_virq_ack
#ifdef CONFIG_PCI_MSI
|| chip == &sun4v_msi
#endif
)
if (chip == &sun4u_irq_ack)
return;
chip = (chip == &sun4u_irq ?
&sun4u_irq_ack :
(chip == &sun4v_irq ?
&sun4v_irq_ack : &sun4v_virq_ack));
set_irq_chip(virt_irq, chip);
set_irq_chip(virt_irq, &sun4u_irq_ack);
}
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
{
struct ino_bucket *bucket;
struct irq_handler_data *data;
unsigned int virt_irq;
int ino;
BUG_ON(tlb_type == hypervisor);
ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
bucket = &ivector_table[ino];
if (!bucket->virt_irq) {
bucket->virt_irq = virt_irq_alloc(__irq(bucket));
set_irq_chip(bucket->virt_irq, &sun4u_irq);
virt_irq = bucket_get_virt_irq(__pa(bucket));
if (!virt_irq) {
virt_irq = virt_irq_alloc(0, ino);
bucket_set_virt_irq(__pa(bucket), virt_irq);
set_irq_chip(virt_irq, &sun4u_irq);
}
data = get_irq_chip_data(bucket->virt_irq);
data = get_irq_chip_data(virt_irq);
if (unlikely(data))
goto out;
......@@ -664,13 +594,13 @@ unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
prom_halt();
}
set_irq_chip_data(bucket->virt_irq, data);
set_irq_chip_data(virt_irq, data);
data->imap = imap;
data->iclr = iclr;
out:
return bucket->virt_irq;
return virt_irq;
}
static unsigned int sun4v_build_common(unsigned long sysino,
......@@ -678,16 +608,19 @@ static unsigned int sun4v_build_common(unsigned long sysino,
{
struct ino_bucket *bucket;
struct irq_handler_data *data;
unsigned int virt_irq;
BUG_ON(tlb_type != hypervisor);
bucket = &ivector_table[sysino];
if (!bucket->virt_irq) {
bucket->virt_irq = virt_irq_alloc(__irq(bucket));
set_irq_chip(bucket->virt_irq, chip);
virt_irq = bucket_get_virt_irq(__pa(bucket));
if (!virt_irq) {
virt_irq = virt_irq_alloc(0, sysino);
bucket_set_virt_irq(__pa(bucket), virt_irq);
set_irq_chip(virt_irq, chip);
}
data = get_irq_chip_data(bucket->virt_irq);
data = get_irq_chip_data(virt_irq);
if (unlikely(data))
goto out;
......@@ -696,7 +629,7 @@ static unsigned int sun4v_build_common(unsigned long sysino,
prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
prom_halt();
}
set_irq_chip_data(bucket->virt_irq, data);
set_irq_chip_data(virt_irq, data);
/* Catch accidental accesses to these things. IMAP/ICLR handling
* is done by hypervisor calls on sun4v platforms, not by direct
......@@ -706,7 +639,7 @@ static unsigned int sun4v_build_common(unsigned long sysino,
data->iclr = ~0UL;
out:
return bucket->virt_irq;
return virt_irq;
}
unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
......@@ -718,86 +651,52 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
{
unsigned long sysino, hv_err;
unsigned int virq;
BUG_ON(devhandle & devino);
sysino = devhandle | devino;
BUG_ON(sysino & ~(IMAP_IGN | IMAP_INO));
hv_err = sun4v_vintr_set_cookie(devhandle, devino, sysino);
if (hv_err) {
prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
"err=%lu\n", devhandle, devino, hv_err);
prom_halt();
}
virq = sun4v_build_common(sysino, &sun4v_virq);
virt_to_real_irq_table[virq].dev_handle = devhandle;
virt_to_real_irq_table[virq].dev_ino = devino;
return virq;
}
#ifdef CONFIG_PCI_MSI
unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p,
unsigned int msi_start, unsigned int msi_end)
{
struct ino_bucket *bucket;
struct irq_handler_data *data;
unsigned long sysino;
unsigned int devino;
BUG_ON(tlb_type != hypervisor);
/* Find a free devino in the given range. */
for (devino = msi_start; devino < msi_end; devino++) {
sysino = sun4v_devino_to_sysino(devhandle, devino);
bucket = &ivector_table[sysino];
if (!bucket->virt_irq)
break;
}
if (devino >= msi_end)
return -ENOSPC;
struct ino_bucket *bucket;
unsigned long hv_err, cookie;
unsigned int virt_irq;
sysino = sun4v_devino_to_sysino(devhandle, devino);
bucket = &ivector_table[sysino];
bucket->virt_irq = virt_irq_alloc(__irq(bucket));
*virt_irq_p = bucket->virt_irq;
set_irq_chip(bucket->virt_irq, &sun4v_msi);
bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
if (unlikely(!bucket))
return 0;
__flush_dcache_range((unsigned long) bucket,
((unsigned long) bucket +
sizeof(struct ino_bucket)));
data = get_irq_chip_data(bucket->virt_irq);
if (unlikely(data))
return devino;
virt_irq = virt_irq_alloc(devhandle, devino);
bucket_set_virt_irq(__pa(bucket), virt_irq);
set_irq_chip(virt_irq, &sun4v_virq);
data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
if (unlikely(!data)) {
virt_irq_free(*virt_irq_p);
return -ENOMEM;
}
set_irq_chip_data(bucket->virt_irq, data);
if (unlikely(!data))
return 0;
set_irq_chip_data(virt_irq, data);
/* Catch accidental accesses to these things. IMAP/ICLR handling
* is done by hypervisor calls on sun4v platforms, not by direct
* register accesses.
*/
data->imap = ~0UL;
data->iclr = ~0UL;
return devino;
}
cookie = ~__pa(bucket);
hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
if (hv_err) {
prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
"err=%lu\n", devhandle, devino, hv_err);
prom_halt();
}
void sun4v_destroy_msi(unsigned int virt_irq)
{
virt_irq_free(virt_irq);
return virt_irq;
}
#endif
void ack_bad_irq(unsigned int virt_irq)
{
struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
unsigned int ino = 0xdeadbeef;
unsigned int ino = virt_irq_table[virt_irq].dev_ino;
if (bucket)
ino = bucket - &ivector_table[0];
if (!ino)
ino = 0xdeadbeef;
printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
ino, virt_irq);
......@@ -805,7 +704,7 @@ void ack_bad_irq(unsigned int virt_irq)
void handler_irq(int irq, struct pt_regs *regs)
{
struct ino_bucket *bucket;
unsigned long pstate, bucket_pa;
struct pt_regs *old_regs;
clear_softint(1 << irq);
......@@ -813,15 +712,28 @@ void handler_irq(int irq, struct pt_regs *regs)
old_regs = set_irq_regs(regs);
irq_enter();
/* Sliiiick... */
bucket = __bucket(xchg32(irq_work(smp_processor_id()), 0));
while (bucket) {
struct ino_bucket *next = __bucket(bucket->irq_chain);
/* Grab an atomic snapshot of the pending IVECs. */
__asm__ __volatile__("rdpr %%pstate, %0\n\t"
"wrpr %0, %3, %%pstate\n\t"
"ldx [%2], %1\n\t"
"stx %%g0, [%2]\n\t"
"wrpr %0, 0x0, %%pstate\n\t"
: "=&r" (pstate), "=&r" (bucket_pa)
: "r" (irq_work_pa(smp_processor_id())),
"i" (PSTATE_IE)
: "memory");
while (bucket_pa) {
unsigned long next_pa;
unsigned int virt_irq;
bucket->irq_chain = 0;
__do_IRQ(bucket->virt_irq);
next_pa = bucket_get_chain_pa(bucket_pa);
virt_irq = bucket_get_virt_irq(bucket_pa);
bucket_clear_chain_pa(bucket_pa);
bucket = next;
__do_IRQ(virt_irq);
bucket_pa = next_pa;
}
irq_exit();
......@@ -921,7 +833,7 @@ void init_irqwork_curcpu(void)
{
int cpu = hard_smp_processor_id();
trap_block[cpu].irq_worklist = 0;
trap_block[cpu].irq_worklist_pa = 0UL;
}
/* Please be very careful with register_one_mondo() and
......@@ -1035,9 +947,21 @@ static struct irqaction timer_irq_action = {
/* Only invoked on boot processor. */
void __init init_IRQ(void)
{
unsigned long size;
map_prom_timers();
kill_prom_timer();
memset(&ivector_table[0], 0, sizeof(ivector_table));
size = sizeof(struct ino_bucket) * NUM_IVECS;
ivector_table = alloc_bootmem_low(size);
if (!ivector_table) {
prom_printf("Fatal error, cannot allocate ivector_table\n");
prom_halt();
}
__flush_dcache_range((unsigned long) ivector_table,
((unsigned long) ivector_table) + size);
ivector_table_pa = __pa(ivector_table);
if (tlb_type == hypervisor)
sun4v_init_mondo_queues();
......
......@@ -872,7 +872,10 @@ __setup("of_debug=", of_debug);
int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus)
{
/* initialize common driver fields */
drv->driver.name = drv->name;
if (!drv->driver.name)
drv->driver.name = drv->name;
if (!drv->driver.owner)
drv->driver.owner = drv->owner;
drv->driver.bus = bus;
/* register with core */
......
......@@ -29,8 +29,6 @@
#include "pci_impl.h"
unsigned long pci_memspace_mask = 0xffffffffUL;
#ifndef CONFIG_PCI
/* A "nop" PCI implementation. */
asmlinkage int sys_pciconfig_read(unsigned long bus, unsigned long dfn,
......@@ -1066,8 +1064,8 @@ static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struc
return 0;
}
/* Adjust vm_pgoff of VMA such that it is the physical page offset corresponding
* to the 32-bit pci bus offset for DEV requested by the user.
/* Adjust vm_pgoff of VMA such that it is the physical page offset
* corresponding to the 32-bit pci bus offset for DEV requested by the user.
*
* Basically, the user finds the base address for his device which he wishes
* to mmap. They read the 32-bit value from the config space base register,
......@@ -1076,21 +1074,35 @@ static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struc
*
* Returns negative error code on failure, zero on success.
*/
static int __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
static int __pci_mmap_make_offset(struct pci_dev *pdev,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state)
{
unsigned long user_offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long user32 = user_offset & pci_memspace_mask;
unsigned long largest_base, this_base, addr32;
int i;
unsigned long user_paddr, user_size;
int i, err;
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
return __pci_mmap_make_offset_bus(dev, vma, mmap_state);
/* First compute the physical address in vma->vm_pgoff,
* making sure the user offset is within range in the
* appropriate PCI space.
*/
err = __pci_mmap_make_offset_bus(pdev, vma, mmap_state);
if (err)
return err;
/* If this is a mapping on a host bridge, any address
* is OK.
*/
if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
return err;
/* Otherwise make sure it's in the range for one of the
* device's resources.
*/
user_paddr = vma->vm_pgoff << PAGE_SHIFT;
user_size = vma->vm_end - vma->vm_start;
/* Figure out which base address this is for. */
largest_base = 0UL;
for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
struct resource *rp = &dev->resource[i];
struct resource *rp = &pdev->resource[i];
/* Active? */
if (!rp->flags)
......@@ -1108,26 +1120,14 @@ static int __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vm
continue;
}
this_base = rp->start;
addr32 = (this_base & PAGE_MASK) & pci_memspace_mask;
if (mmap_state == pci_mmap_io)
addr32 &= 0xffffff;
if (addr32 <= user32 && this_base > largest_base)
largest_base = this_base;
if ((rp->start <= user_paddr) &&
(user_paddr + user_size) <= (rp->end + 1UL))
break;
}
if (largest_base == 0UL)
if (i > PCI_ROM_RESOURCE)
return -EINVAL;
/* Now construct the final physical address. */
if (mmap_state == pci_mmap_io)
vma->vm_pgoff = (((largest_base & ~0xffffffUL) | user32) >> PAGE_SHIFT);
else
vma->vm_pgoff = (((largest_base & ~(pci_memspace_mask)) | user32) >> PAGE_SHIFT);
return 0;
}
......
......@@ -6,9 +6,12 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/msi.h>
#include <linux/irq.h>
#include <asm/oplib.h>
#include <asm/prom.h>
#include <asm/irq.h>
#include "pci_impl.h"
......@@ -84,6 +87,266 @@ static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
return 0;
}
#ifdef CONFIG_PCI_MSI
struct pci_msiq_entry {
u64 word0;
#define MSIQ_WORD0_RESV 0x8000000000000000UL
#define MSIQ_WORD0_FMT_TYPE 0x7f00000000000000UL
#define MSIQ_WORD0_FMT_TYPE_SHIFT 56
#define MSIQ_WORD0_LEN 0x00ffc00000000000UL
#define MSIQ_WORD0_LEN_SHIFT 46
#define MSIQ_WORD0_ADDR0 0x00003fff00000000UL
#define MSIQ_WORD0_ADDR0_SHIFT 32
#define MSIQ_WORD0_RID 0x00000000ffff0000UL
#define MSIQ_WORD0_RID_SHIFT 16
#define MSIQ_WORD0_DATA0 0x000000000000ffffUL
#define MSIQ_WORD0_DATA0_SHIFT 0
#define MSIQ_TYPE_MSG 0x6
#define MSIQ_TYPE_MSI32 0xb
#define MSIQ_TYPE_MSI64 0xf
u64 word1;
#define MSIQ_WORD1_ADDR1 0xffffffffffff0000UL
#define MSIQ_WORD1_ADDR1_SHIFT 16
#define MSIQ_WORD1_DATA1 0x000000000000ffffUL
#define MSIQ_WORD1_DATA1_SHIFT 0
u64 resv[6];
};
/* All MSI registers are offset from pbm->pbm_regs */
#define EVENT_QUEUE_BASE_ADDR_REG 0x010000UL
#define EVENT_QUEUE_BASE_ADDR_ALL_ONES 0xfffc000000000000UL
#define EVENT_QUEUE_CONTROL_SET(EQ) (0x011000UL + (EQ) * 0x8UL)
#define EVENT_QUEUE_CONTROL_SET_OFLOW 0x0200000000000000UL
#define EVENT_QUEUE_CONTROL_SET_EN 0x0000100000000000UL
#define EVENT_QUEUE_CONTROL_CLEAR(EQ) (0x011200UL + (EQ) * 0x8UL)
#define EVENT_QUEUE_CONTROL_CLEAR_OF 0x0200000000000000UL
#define EVENT_QUEUE_CONTROL_CLEAR_E2I 0x0000800000000000UL
#define EVENT_QUEUE_CONTROL_CLEAR_DIS 0x0000100000000000UL
#define EVENT_QUEUE_STATE(EQ) (0x011400UL + (EQ) * 0x8UL)
#define EVENT_QUEUE_STATE_MASK 0x0000000000000007UL
#define EVENT_QUEUE_STATE_IDLE 0x0000000000000001UL
#define EVENT_QUEUE_STATE_ACTIVE 0x0000000000000002UL
#define EVENT_QUEUE_STATE_ERROR 0x0000000000000004UL
#define EVENT_QUEUE_TAIL(EQ) (0x011600UL + (EQ) * 0x8UL)
#define EVENT_QUEUE_TAIL_OFLOW 0x0200000000000000UL
#define EVENT_QUEUE_TAIL_VAL 0x000000000000007fUL
#define EVENT_QUEUE_HEAD(EQ) (0x011800UL + (EQ) * 0x8UL)
#define EVENT_QUEUE_HEAD_VAL 0x000000000000007fUL
#define MSI_MAP(MSI) (0x020000UL + (MSI) * 0x8UL)
#define MSI_MAP_VALID 0x8000000000000000UL
#define MSI_MAP_EQWR_N 0x4000000000000000UL
#define MSI_MAP_EQNUM 0x000000000000003fUL
#define MSI_CLEAR(MSI) (0x028000UL + (MSI) * 0x8UL)
#define MSI_CLEAR_EQWR_N 0x4000000000000000UL
#define IMONDO_DATA0 0x02C000UL
#define IMONDO_DATA0_DATA 0xffffffffffffffc0UL
#define IMONDO_DATA1 0x02C008UL
#define IMONDO_DATA1_DATA 0xffffffffffffffffUL
#define MSI_32BIT_ADDR 0x034000UL
#define MSI_32BIT_ADDR_VAL 0x00000000ffff0000UL
#define MSI_64BIT_ADDR 0x034008UL
#define MSI_64BIT_ADDR_VAL 0xffffffffffff0000UL
static int pci_fire_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long *head)
{
*head = fire_read(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
return 0;
}
static int pci_fire_dequeue_msi(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long *head, unsigned long *msi)
{
unsigned long type_fmt, type, msi_num;
struct pci_msiq_entry *base, *ep;
base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192));
ep = &base[*head];
if ((ep->word0 & MSIQ_WORD0_FMT_TYPE) == 0)
return 0;
type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >>
MSIQ_WORD0_FMT_TYPE_SHIFT);
type = (type_fmt >> 3);
if (unlikely(type != MSIQ_TYPE_MSI32 &&
type != MSIQ_TYPE_MSI64))
return -EINVAL;
*msi = msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >>
MSIQ_WORD0_DATA0_SHIFT);
fire_write(pbm->pbm_regs + MSI_CLEAR(msi_num),
MSI_CLEAR_EQWR_N);
/* Clear the entry. */
ep->word0 &= ~MSIQ_WORD0_FMT_TYPE;
/* Go to next entry in ring. */
(*head)++;
if (*head >= pbm->msiq_ent_count)
*head = 0;
return 1;
}
static int pci_fire_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long head)
{
fire_write(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid), head);
return 0;
}
static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long msi, int is_msi64)
{
u64 val;
val = fire_read(pbm->pbm_regs + MSI_MAP(msi));
val &= ~(MSI_MAP_EQNUM);
val |= msiqid;
fire_write(pbm->pbm_regs + MSI_MAP(msi), val);
fire_write(pbm->pbm_regs + MSI_CLEAR(msi),
MSI_CLEAR_EQWR_N);
val = fire_read(pbm->pbm_regs + MSI_MAP(msi));
val |= MSI_MAP_VALID;
fire_write(pbm->pbm_regs + MSI_MAP(msi), val);
return 0;
}
static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
{
unsigned long msiqid;
u64 val;
val = fire_read(pbm->pbm_regs + MSI_MAP(msi));
msiqid = (val & MSI_MAP_EQNUM);
val &= ~MSI_MAP_VALID;
fire_write(pbm->pbm_regs + MSI_MAP(msi), val);
return 0;
}
static int pci_fire_msiq_alloc(struct pci_pbm_info *pbm)
{
unsigned long pages, order, i;
order = get_order(512 * 1024);
pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
if (pages == 0UL) {
printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
order);
return -ENOMEM;
}
memset((char *)pages, 0, PAGE_SIZE << order);
pbm->msi_queues = (void *) pages;
fire_write(pbm->pbm_regs + EVENT_QUEUE_BASE_ADDR_REG,
(EVENT_QUEUE_BASE_ADDR_ALL_ONES |
__pa(pbm->msi_queues)));
fire_write(pbm->pbm_regs + IMONDO_DATA0,
pbm->portid << 6);
fire_write(pbm->pbm_regs + IMONDO_DATA1, 0);
fire_write(pbm->pbm_regs + MSI_32BIT_ADDR,
pbm->msi32_start);
fire_write(pbm->pbm_regs + MSI_64BIT_ADDR,
pbm->msi64_start);
for (i = 0; i < pbm->msiq_num; i++) {
fire_write(pbm->pbm_regs + EVENT_QUEUE_HEAD(i), 0);
fire_write(pbm->pbm_regs + EVENT_QUEUE_TAIL(i), 0);
}
return 0;
}
static void pci_fire_msiq_free(struct pci_pbm_info *pbm)
{
unsigned long pages, order;
order = get_order(512 * 1024);
pages = (unsigned long) pbm->msi_queues;
free_pages(pages, order);
pbm->msi_queues = NULL;
}
static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
unsigned long msiqid,
unsigned long devino)
{
unsigned long cregs = (unsigned long) pbm->pbm_regs;
unsigned long imap_reg, iclr_reg, int_ctrlr;
unsigned int virt_irq;
int fixup;
u64 val;
imap_reg = cregs + (0x001000UL + (devino * 0x08UL));
iclr_reg = cregs + (0x001400UL + (devino * 0x08UL));
/* XXX iterate amongst the 4 IRQ controllers XXX */
int_ctrlr = (1UL << 6);
val = fire_read(imap_reg);
val |= (1UL << 63) | int_ctrlr;
fire_write(imap_reg, val);
fixup = ((pbm->portid << 6) | devino) - int_ctrlr;
virt_irq = build_irq(fixup, iclr_reg, imap_reg);
if (!virt_irq)
return -ENOMEM;
fire_write(pbm->pbm_regs +
EVENT_QUEUE_CONTROL_SET(msiqid),
EVENT_QUEUE_CONTROL_SET_EN);
return virt_irq;
}
static const struct sparc64_msiq_ops pci_fire_msiq_ops = {
.get_head = pci_fire_get_head,
.dequeue_msi = pci_fire_dequeue_msi,
.set_head = pci_fire_set_head,
.msi_setup = pci_fire_msi_setup,
.msi_teardown = pci_fire_msi_teardown,
.msiq_alloc = pci_fire_msiq_alloc,
.msiq_free = pci_fire_msiq_free,
.msiq_build_irq = pci_fire_msiq_build_irq,
};
static void pci_fire_msi_init(struct pci_pbm_info *pbm)
{
sparc64_pbm_msi_init(pbm, &pci_fire_msiq_ops);
}
#else /* CONFIG_PCI_MSI */
static void pci_fire_msi_init(struct pci_pbm_info *pbm)
{
}
#endif /* !(CONFIG_PCI_MSI) */
/* Based at pbm->controller_regs */
#define FIRE_PARITY_CONTROL 0x470010UL
#define FIRE_PARITY_ENAB 0x8000000000000000UL
......@@ -176,6 +439,7 @@ static int pci_fire_pbm_init(struct pci_controller_info *p,
{
const struct linux_prom64_registers *regs;
struct pci_pbm_info *pbm;
int err;
if ((portid & 1) == 0)
pbm = &p->pbm_A;
......@@ -208,7 +472,13 @@ static int pci_fire_pbm_init(struct pci_controller_info *p,
pci_fire_hw_init(pbm);
return pci_fire_pbm_iommu_init(pbm);
err = pci_fire_pbm_iommu_init(pbm);
if (err)
return err;
pci_fire_msi_init(pbm);
return 0;
}
static inline int portid_compare(u32 x, u32 y)
......@@ -249,13 +519,6 @@ void fire_pci_init(struct device_node *dp, const char *model_name)
p->pbm_B.iommu = iommu;
/* XXX MSI support XXX */
/* Like PSYCHO and SCHIZO we have a 2GB aligned area
* for memory space.
*/
pci_memspace_mask = 0x7fffffffUL;
if (pci_fire_pbm_init(p, dp, portid))
goto fatal_memory_error;
......
......@@ -29,6 +29,33 @@
#define PCI_STC_FLUSHFLAG_SET(STC) \
(*((STC)->strbuf_flushflag) != 0UL)
#ifdef CONFIG_PCI_MSI
struct pci_pbm_info;
struct sparc64_msiq_ops {
int (*get_head)(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long *head);
int (*dequeue_msi)(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long *head, unsigned long *msi);
int (*set_head)(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long head);
int (*msi_setup)(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long msi, int is_msi64);
int (*msi_teardown)(struct pci_pbm_info *pbm, unsigned long msi);
int (*msiq_alloc)(struct pci_pbm_info *pbm);
void (*msiq_free)(struct pci_pbm_info *pbm);
int (*msiq_build_irq)(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long devino);
};
extern void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
const struct sparc64_msiq_ops *ops);
struct sparc64_msiq_cookie {
struct pci_pbm_info *pbm;
unsigned long msiqid;
};
#endif
struct pci_controller_info;
struct pci_pbm_info {
......@@ -90,6 +117,8 @@ struct pci_pbm_info {
u32 msiq_ent_count;
u32 msiq_first;
u32 msiq_first_devino;
u32 msiq_rotor;
struct sparc64_msiq_cookie *msiq_irq_cookies;
u32 msi_num;
u32 msi_first;
u32 msi_data_mask;
......@@ -100,9 +129,11 @@ struct pci_pbm_info {
u32 msi64_len;
void *msi_queues;
unsigned long *msi_bitmap;
unsigned int *msi_irq_table;
int (*setup_msi_irq)(unsigned int *virt_irq_p, struct pci_dev *pdev,
struct msi_desc *entry);
void (*teardown_msi_irq)(unsigned int virt_irq, struct pci_dev *pdev);
const struct sparc64_msiq_ops *msi_ops;
#endif /* !(CONFIG_PCI_MSI) */
/* This PBM's streaming buffer. */
......@@ -126,7 +157,6 @@ struct pci_controller_info {
};
extern struct pci_pbm_info *pci_pbm_root;
extern unsigned long pci_memspace_mask;
extern int pci_num_pbms;
......
/* pci_msi.c: Sparc64 MSI support common layer.
*
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include "pci_impl.h"
static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie)
{
struct sparc64_msiq_cookie *msiq_cookie = cookie;
struct pci_pbm_info *pbm = msiq_cookie->pbm;
unsigned long msiqid = msiq_cookie->msiqid;
const struct sparc64_msiq_ops *ops;
unsigned long orig_head, head;
int err;
ops = pbm->msi_ops;
err = ops->get_head(pbm, msiqid, &head);
if (unlikely(err < 0))
goto err_get_head;
orig_head = head;
for (;;) {
unsigned long msi;
err = ops->dequeue_msi(pbm, msiqid, &head, &msi);
if (likely(err > 0))
__do_IRQ(pbm->msi_irq_table[msi - pbm->msi_first]);
if (unlikely(err < 0))
goto err_dequeue;
if (err == 0)
break;
}
if (likely(head != orig_head)) {
err = ops->set_head(pbm, msiqid, head);
if (unlikely(err < 0))
goto err_set_head;
}
return IRQ_HANDLED;
err_get_head:
printk(KERN_EMERG "MSI: Get head on msiqid[%lu] gives error %d\n",
msiqid, err);
goto err_out;
err_dequeue:
printk(KERN_EMERG "MSI: Dequeue head[%lu] from msiqid[%lu] "
"gives error %d\n",
head, msiqid, err);
goto err_out;
err_set_head:
printk(KERN_EMERG "MSI: Set head[%lu] on msiqid[%lu] "
"gives error %d\n",
head, msiqid, err);
goto err_out;
err_out:
return IRQ_NONE;
}
static u32 pick_msiq(struct pci_pbm_info *pbm)
{
static DEFINE_SPINLOCK(rotor_lock);
unsigned long flags;
u32 ret, rotor;
spin_lock_irqsave(&rotor_lock, flags);
rotor = pbm->msiq_rotor;
ret = pbm->msiq_first + rotor;
if (++rotor >= pbm->msiq_num)
rotor = 0;
pbm->msiq_rotor = rotor;
spin_unlock_irqrestore(&rotor_lock, flags);
return ret;
}
static int alloc_msi(struct pci_pbm_info *pbm)
{
int i;
for (i = 0; i < pbm->msi_num; i++) {
if (!test_and_set_bit(i, pbm->msi_bitmap))
return i + pbm->msi_first;
}
return -ENOENT;
}
static void free_msi(struct pci_pbm_info *pbm, int msi_num)
{
msi_num -= pbm->msi_first;
clear_bit(msi_num, pbm->msi_bitmap);
}
static struct irq_chip msi_irq = {
.typename = "PCI-MSI",
.mask = mask_msi_irq,
.unmask = unmask_msi_irq,
.enable = unmask_msi_irq,
.disable = mask_msi_irq,
/* XXX affinity XXX */
};
int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
struct pci_dev *pdev,
struct msi_desc *entry)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
const struct sparc64_msiq_ops *ops = pbm->msi_ops;
struct msi_msg msg;
int msi, err;
u32 msiqid;
*virt_irq_p = virt_irq_alloc(0, 0);
err = -ENOMEM;
if (!*virt_irq_p)
goto out_err;
set_irq_chip(*virt_irq_p, &msi_irq);
err = alloc_msi(pbm);
if (unlikely(err < 0))
goto out_virt_irq_free;
msi = err;
msiqid = pick_msiq(pbm);
err = ops->msi_setup(pbm, msiqid, msi,
(entry->msi_attrib.is_64 ? 1 : 0));
if (err)
goto out_msi_free;
pbm->msi_irq_table[msi - pbm->msi_first] = *virt_irq_p;
if (entry->msi_attrib.is_64) {
msg.address_hi = pbm->msi64_start >> 32;
msg.address_lo = pbm->msi64_start & 0xffffffff;
} else {
msg.address_hi = 0;
msg.address_lo = pbm->msi32_start;
}
msg.data = msi;
set_irq_msi(*virt_irq_p, entry);
write_msi_msg(*virt_irq_p, &msg);
return 0;
out_msi_free:
free_msi(pbm, msi);
out_virt_irq_free:
set_irq_chip(*virt_irq_p, NULL);
virt_irq_free(*virt_irq_p);
*virt_irq_p = 0;
out_err:
return err;
}
void sparc64_teardown_msi_irq(unsigned int virt_irq,
struct pci_dev *pdev)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
const struct sparc64_msiq_ops *ops = pbm->msi_ops;
unsigned int msi_num;
int i, err;
for (i = 0; i < pbm->msi_num; i++) {
if (pbm->msi_irq_table[i] == virt_irq)
break;
}
if (i >= pbm->msi_num) {
printk(KERN_ERR "%s: teardown: No MSI for irq %u\n",
pbm->name, virt_irq);
return;
}
msi_num = pbm->msi_first + i;
pbm->msi_irq_table[i] = ~0U;
err = ops->msi_teardown(pbm, msi_num);
if (err) {
printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, "
"irq %u, gives error %d\n",
pbm->name, msi_num, virt_irq, err);
return;
}
free_msi(pbm, msi_num);
set_irq_chip(virt_irq, NULL);
virt_irq_free(virt_irq);
}
static int msi_bitmap_alloc(struct pci_pbm_info *pbm)
{
unsigned long size, bits_per_ulong;
bits_per_ulong = sizeof(unsigned long) * 8;
size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1);
size /= 8;
BUG_ON(size % sizeof(unsigned long));
pbm->msi_bitmap = kzalloc(size, GFP_KERNEL);
if (!pbm->msi_bitmap)
return -ENOMEM;
return 0;
}
static void msi_bitmap_free(struct pci_pbm_info *pbm)
{
kfree(pbm->msi_bitmap);
pbm->msi_bitmap = NULL;
}
static int msi_table_alloc(struct pci_pbm_info *pbm)
{
int size, i;
size = pbm->msiq_num * sizeof(struct sparc64_msiq_cookie);
pbm->msiq_irq_cookies = kzalloc(size, GFP_KERNEL);
if (!pbm->msiq_irq_cookies)
return -ENOMEM;
for (i = 0; i < pbm->msiq_num; i++) {
struct sparc64_msiq_cookie *p;
p = &pbm->msiq_irq_cookies[i];
p->pbm = pbm;
p->msiqid = pbm->msiq_first + i;
}
size = pbm->msi_num * sizeof(unsigned int);
pbm->msi_irq_table = kzalloc(size, GFP_KERNEL);
if (!pbm->msi_irq_table) {
kfree(pbm->msiq_irq_cookies);
pbm->msiq_irq_cookies = NULL;
return -ENOMEM;
}
return 0;
}
static void msi_table_free(struct pci_pbm_info *pbm)
{
kfree(pbm->msiq_irq_cookies);
pbm->msiq_irq_cookies = NULL;
kfree(pbm->msi_irq_table);
pbm->msi_irq_table = NULL;
}
static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
const struct sparc64_msiq_ops *ops,
unsigned long msiqid,
unsigned long devino)
{
int irq = ops->msiq_build_irq(pbm, msiqid, devino);
int err;
if (irq < 0)
return irq;
err = request_irq(irq, sparc64_msiq_interrupt, 0,
"MSIQ",
&pbm->msiq_irq_cookies[msiqid - pbm->msiq_first]);
if (err)
return err;
return 0;
}
static int sparc64_bringup_msi_queues(struct pci_pbm_info *pbm,
const struct sparc64_msiq_ops *ops)
{
int i;
for (i = 0; i < pbm->msiq_num; i++) {
unsigned long msiqid = i + pbm->msiq_first;
unsigned long devino = i + pbm->msiq_first_devino;
int err;
err = bringup_one_msi_queue(pbm, ops, msiqid, devino);
if (err)
return err;
}
return 0;
}
void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
const struct sparc64_msiq_ops *ops)
{
const u32 *val;
int len;
val = of_get_property(pbm->prom_node, "#msi-eqs", &len);
if (!val || len != 4)
goto no_msi;
pbm->msiq_num = *val;
if (pbm->msiq_num) {
const struct msiq_prop {
u32 first_msiq;
u32 num_msiq;
u32 first_devino;
} *mqp;
const struct msi_range_prop {
u32 first_msi;
u32 num_msi;
} *mrng;
const struct addr_range_prop {
u32 msi32_high;
u32 msi32_low;
u32 msi32_len;
u32 msi64_high;
u32 msi64_low;
u32 msi64_len;
} *arng;
val = of_get_property(pbm->prom_node, "msi-eq-size", &len);
if (!val || len != 4)
goto no_msi;
pbm->msiq_ent_count = *val;
mqp = of_get_property(pbm->prom_node,
"msi-eq-to-devino", &len);
if (!mqp)
mqp = of_get_property(pbm->prom_node,
"msi-eq-devino", &len);
if (!mqp || len != sizeof(struct msiq_prop))
goto no_msi;
pbm->msiq_first = mqp->first_msiq;
pbm->msiq_first_devino = mqp->first_devino;
val = of_get_property(pbm->prom_node, "#msi", &len);
if (!val || len != 4)
goto no_msi;
pbm->msi_num = *val;
mrng = of_get_property(pbm->prom_node, "msi-ranges", &len);
if (!mrng || len != sizeof(struct msi_range_prop))
goto no_msi;
pbm->msi_first = mrng->first_msi;
val = of_get_property(pbm->prom_node, "msi-data-mask", &len);
if (!val || len != 4)
goto no_msi;
pbm->msi_data_mask = *val;
val = of_get_property(pbm->prom_node, "msix-data-width", &len);
if (!val || len != 4)
goto no_msi;
pbm->msix_data_width = *val;
arng = of_get_property(pbm->prom_node, "msi-address-ranges",
&len);
if (!arng || len != sizeof(struct addr_range_prop))
goto no_msi;
pbm->msi32_start = ((u64)arng->msi32_high << 32) |
(u64) arng->msi32_low;
pbm->msi64_start = ((u64)arng->msi64_high << 32) |
(u64) arng->msi64_low;
pbm->msi32_len = arng->msi32_len;
pbm->msi64_len = arng->msi64_len;
if (msi_bitmap_alloc(pbm))
goto no_msi;
if (msi_table_alloc(pbm)) {
msi_bitmap_free(pbm);
goto no_msi;
}
if (ops->msiq_alloc(pbm)) {
msi_table_free(pbm);
msi_bitmap_free(pbm);
goto no_msi;
}
if (sparc64_bringup_msi_queues(pbm, ops)) {
ops->msiq_free(pbm);
msi_table_free(pbm);
msi_bitmap_free(pbm);
goto no_msi;
}
printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] "
"devino[0x%x]\n",
pbm->name,
pbm->msiq_first, pbm->msiq_num,
pbm->msiq_ent_count,
pbm->msiq_first_devino);
printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] "
"width[%u]\n",
pbm->name,
pbm->msi_first, pbm->msi_num, pbm->msi_data_mask,
pbm->msix_data_width);
printk(KERN_INFO "%s: MSI addr32[0x%lx:0x%x] "
"addr64[0x%lx:0x%x]\n",
pbm->name,
pbm->msi32_start, pbm->msi32_len,
pbm->msi64_start, pbm->msi64_len);
printk(KERN_INFO "%s: MSI queues at RA [%016lx]\n",
pbm->name,
__pa(pbm->msi_queues));
pbm->msi_ops = ops;
pbm->setup_msi_irq = sparc64_setup_msi_irq;
pbm->teardown_msi_irq = sparc64_teardown_msi_irq;
}
return;
no_msi:
pbm->msiq_num = 0;
printk(KERN_INFO "%s: No MSI support.\n", pbm->name);
}
......@@ -1058,12 +1058,6 @@ void psycho_init(struct device_node *dp, char *model_name)
p->pbm_A.config_space = p->pbm_B.config_space =
(pr_regs[2].phys_addr + PSYCHO_CONFIGSPACE);
/*
* Psycho's PCI MEM space is mapped to a 2GB aligned area, so
* we need to adjust our MEM space mask.
*/
pci_memspace_mask = 0x7fffffffUL;
psycho_controller_hwinit(&p->pbm_A);
if (psycho_iommu_init(&p->pbm_A))
......
......@@ -1464,9 +1464,6 @@ static void __schizo_init(struct device_node *dp, char *model_name, int chip_typ
p->pbm_B.iommu = iommu;
/* Like PSYCHO we have a 2GB aligned area for memory space. */
pci_memspace_mask = 0x7fffffffUL;
if (schizo_pbm_init(p, dp, portid, chip_type))
goto fatal_memory_error;
......
......@@ -748,111 +748,102 @@ struct pci_sun4v_msiq_entry {
u64 reserved2;
};
/* For now this just runs as a pre-handler for the real interrupt handler.
* So we just walk through the queue and ACK all the entries, update the
* head pointer, and return.
*
* In the longer term it would be nice to do something more integrated
* wherein we can pass in some of this MSI info to the drivers. This
* would be most useful for PCIe fabric error messages, although we could
* invoke those directly from the loop here in order to pass the info around.
*/
static void pci_sun4v_msi_prehandler(unsigned int ino, void *data1, void *data2)
static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long *head)
{
struct pci_pbm_info *pbm = data1;
struct pci_sun4v_msiq_entry *base, *ep;
unsigned long msiqid, orig_head, head, type, err;
msiqid = (unsigned long) data2;
unsigned long err, limit;
head = 0xdeadbeef;
err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, &head);
err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
if (unlikely(err))
goto hv_error_get;
if (unlikely(head >= (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry))))
goto bad_offset;
head /= sizeof(struct pci_sun4v_msiq_entry);
orig_head = head;
base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
(pbm->msiq_ent_count *
sizeof(struct pci_sun4v_msiq_entry))));
ep = &base[head];
while ((ep->version_type & MSIQ_TYPE_MASK) != 0) {
type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
if (unlikely(type != MSIQ_TYPE_MSI32 &&
type != MSIQ_TYPE_MSI64))
goto bad_type;
pci_sun4v_msi_setstate(pbm->devhandle,
ep->msi_data /* msi_num */,
HV_MSISTATE_IDLE);
/* Clear the entry. */
ep->version_type &= ~MSIQ_TYPE_MASK;
/* Go to next entry in ring. */
head++;
if (head >= pbm->msiq_ent_count)
head = 0;
ep = &base[head];
}
return -ENXIO;
if (likely(head != orig_head)) {
/* ACK entries by updating head pointer. */
head *= sizeof(struct pci_sun4v_msiq_entry);
err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
if (unlikely(err))
goto hv_error_set;
}
return;
limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
if (unlikely(*head >= limit))
return -EFBIG;
hv_error_set:
printk(KERN_EMERG "MSI: Hypervisor set head gives error %lu\n", err);
goto hv_error_cont;
return 0;
}
hv_error_get:
printk(KERN_EMERG "MSI: Hypervisor get head gives error %lu\n", err);
static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
unsigned long msiqid, unsigned long *head,
unsigned long *msi)
{
struct pci_sun4v_msiq_entry *ep;
unsigned long err, type;
hv_error_cont:
printk(KERN_EMERG "MSI: devhandle[%x] msiqid[%lx] head[%lu]\n",
pbm->devhandle, msiqid, head);
return;
/* Note: void pointer arithmetic, 'head' is a byte offset */
ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
(pbm->msiq_ent_count *
sizeof(struct pci_sun4v_msiq_entry))) +
*head);
bad_offset:
printk(KERN_EMERG "MSI: Hypervisor gives bad offset %lx max(%lx)\n",
head, pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry));
return;
if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
return 0;
bad_type:
printk(KERN_EMERG "MSI: Entry has bad type %lx\n", type);
return;
type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
if (unlikely(type != MSIQ_TYPE_MSI32 &&
type != MSIQ_TYPE_MSI64))
return -EINVAL;
*msi = ep->msi_data;
err = pci_sun4v_msi_setstate(pbm->devhandle,
ep->msi_data /* msi_num */,
HV_MSISTATE_IDLE);
if (unlikely(err))
return -ENXIO;
/* Clear the entry. */
ep->version_type &= ~MSIQ_TYPE_MASK;
(*head) += sizeof(struct pci_sun4v_msiq_entry);
if (*head >=
(pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
*head = 0;
return 1;
}
static int msi_bitmap_alloc(struct pci_pbm_info *pbm)
static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long head)
{
unsigned long size, bits_per_ulong;
unsigned long err;
bits_per_ulong = sizeof(unsigned long) * 8;
size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1);
size /= 8;
BUG_ON(size % sizeof(unsigned long));
err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
if (unlikely(err))
return -EINVAL;
pbm->msi_bitmap = kzalloc(size, GFP_KERNEL);
if (!pbm->msi_bitmap)
return -ENOMEM;
return 0;
}
static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long msi, int is_msi64)
{
if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
(is_msi64 ?
HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
return -ENXIO;
if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
return -ENXIO;
if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
return -ENXIO;
return 0;
}
static void msi_bitmap_free(struct pci_pbm_info *pbm)
static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
{
kfree(pbm->msi_bitmap);
pbm->msi_bitmap = NULL;
unsigned long err, msiqid;
err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
if (err)
return -ENXIO;
pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
return 0;
}
static int msi_queue_alloc(struct pci_pbm_info *pbm)
static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
{
unsigned long q_size, alloc_size, pages, order;
int i;
......@@ -906,232 +897,59 @@ static int msi_queue_alloc(struct pci_pbm_info *pbm)
return -EINVAL;
}
static int alloc_msi(struct pci_pbm_info *pbm)
static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
{
unsigned long q_size, alloc_size, pages, order;
int i;
for (i = 0; i < pbm->msi_num; i++) {
if (!test_and_set_bit(i, pbm->msi_bitmap))
return i + pbm->msi_first;
}
return -ENOENT;
}
static void free_msi(struct pci_pbm_info *pbm, int msi_num)
{
msi_num -= pbm->msi_first;
clear_bit(msi_num, pbm->msi_bitmap);
}
static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p,
struct pci_dev *pdev,
struct msi_desc *entry)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
unsigned long devino, msiqid;
struct msi_msg msg;
int msi_num, err;
*virt_irq_p = 0;
msi_num = alloc_msi(pbm);
if (msi_num < 0)
return msi_num;
err = sun4v_build_msi(pbm->devhandle, virt_irq_p,
pbm->msiq_first_devino,
(pbm->msiq_first_devino +
pbm->msiq_num));
if (err < 0)
goto out_err;
devino = err;
msiqid = ((devino - pbm->msiq_first_devino) +
pbm->msiq_first);
err = -EINVAL;
if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
if (err)
goto out_err;
if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
goto out_err;
if (pci_sun4v_msi_setmsiq(pbm->devhandle,
msi_num, msiqid,
(entry->msi_attrib.is_64 ?
HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
goto out_err;
if (pci_sun4v_msi_setstate(pbm->devhandle, msi_num, HV_MSISTATE_IDLE))
goto out_err;
if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID))
goto out_err;
sparc64_set_msi(*virt_irq_p, msi_num);
for (i = 0; i < pbm->msiq_num; i++) {
unsigned long msiqid = pbm->msiq_first + i;
if (entry->msi_attrib.is_64) {
msg.address_hi = pbm->msi64_start >> 32;
msg.address_lo = pbm->msi64_start & 0xffffffff;
} else {
msg.address_hi = 0;
msg.address_lo = pbm->msi32_start;
(void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
}
msg.data = msi_num;
set_irq_msi(*virt_irq_p, entry);
write_msi_msg(*virt_irq_p, &msg);
irq_install_pre_handler(*virt_irq_p,
pci_sun4v_msi_prehandler,
pbm, (void *) msiqid);
q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
alloc_size = (pbm->msiq_num * q_size);
order = get_order(alloc_size);
return 0;
pages = (unsigned long) pbm->msi_queues;
out_err:
free_msi(pbm, msi_num);
return err;
free_pages(pages, order);
pbm->msi_queues = NULL;
}
static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq,
struct pci_dev *pdev)
static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
unsigned long msiqid,
unsigned long devino)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
unsigned long msiqid, err;
unsigned int msi_num;
msi_num = sparc64_get_msi(virt_irq);
err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid);
if (err) {
printk(KERN_ERR "%s: getmsiq gives error %lu\n",
pbm->name, err);
return;
}
unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_INVALID);
pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_INVALID);
if (!virt_irq)
return -ENOMEM;
free_msi(pbm, msi_num);
if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
return -EINVAL;
if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
return -EINVAL;
/* The sun4v_destroy_msi() will liberate the devino and thus the MSIQ
* allocation.
*/
sun4v_destroy_msi(virt_irq);
return virt_irq;
}
static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
.get_head = pci_sun4v_get_head,
.dequeue_msi = pci_sun4v_dequeue_msi,
.set_head = pci_sun4v_set_head,
.msi_setup = pci_sun4v_msi_setup,
.msi_teardown = pci_sun4v_msi_teardown,
.msiq_alloc = pci_sun4v_msiq_alloc,
.msiq_free = pci_sun4v_msiq_free,
.msiq_build_irq = pci_sun4v_msiq_build_irq,
};
static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
{
const u32 *val;
int len;
val = of_get_property(pbm->prom_node, "#msi-eqs", &len);
if (!val || len != 4)
goto no_msi;
pbm->msiq_num = *val;
if (pbm->msiq_num) {
const struct msiq_prop {
u32 first_msiq;
u32 num_msiq;
u32 first_devino;
} *mqp;
const struct msi_range_prop {
u32 first_msi;
u32 num_msi;
} *mrng;
const struct addr_range_prop {
u32 msi32_high;
u32 msi32_low;
u32 msi32_len;
u32 msi64_high;
u32 msi64_low;
u32 msi64_len;
} *arng;
val = of_get_property(pbm->prom_node, "msi-eq-size", &len);
if (!val || len != 4)
goto no_msi;
pbm->msiq_ent_count = *val;
mqp = of_get_property(pbm->prom_node,
"msi-eq-to-devino", &len);
if (!mqp || len != sizeof(struct msiq_prop))
goto no_msi;
pbm->msiq_first = mqp->first_msiq;
pbm->msiq_first_devino = mqp->first_devino;
val = of_get_property(pbm->prom_node, "#msi", &len);
if (!val || len != 4)
goto no_msi;
pbm->msi_num = *val;
mrng = of_get_property(pbm->prom_node, "msi-ranges", &len);
if (!mrng || len != sizeof(struct msi_range_prop))
goto no_msi;
pbm->msi_first = mrng->first_msi;
val = of_get_property(pbm->prom_node, "msi-data-mask", &len);
if (!val || len != 4)
goto no_msi;
pbm->msi_data_mask = *val;
val = of_get_property(pbm->prom_node, "msix-data-width", &len);
if (!val || len != 4)
goto no_msi;
pbm->msix_data_width = *val;
arng = of_get_property(pbm->prom_node, "msi-address-ranges",
&len);
if (!arng || len != sizeof(struct addr_range_prop))
goto no_msi;
pbm->msi32_start = ((u64)arng->msi32_high << 32) |
(u64) arng->msi32_low;
pbm->msi64_start = ((u64)arng->msi64_high << 32) |
(u64) arng->msi64_low;
pbm->msi32_len = arng->msi32_len;
pbm->msi64_len = arng->msi64_len;
if (msi_bitmap_alloc(pbm))
goto no_msi;
if (msi_queue_alloc(pbm)) {
msi_bitmap_free(pbm);
goto no_msi;
}
printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] "
"devino[0x%x]\n",
pbm->name,
pbm->msiq_first, pbm->msiq_num,
pbm->msiq_ent_count,
pbm->msiq_first_devino);
printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] "
"width[%u]\n",
pbm->name,
pbm->msi_first, pbm->msi_num, pbm->msi_data_mask,
pbm->msix_data_width);
printk(KERN_INFO "%s: MSI addr32[0x%lx:0x%x] "
"addr64[0x%lx:0x%x]\n",
pbm->name,
pbm->msi32_start, pbm->msi32_len,
pbm->msi64_start, pbm->msi64_len);
printk(KERN_INFO "%s: MSI queues at RA [%p]\n",
pbm->name,
pbm->msi_queues);
}
pbm->setup_msi_irq = pci_sun4v_setup_msi_irq;
pbm->teardown_msi_irq = pci_sun4v_teardown_msi_irq;
return;
no_msi:
pbm->msiq_num = 0;
printk(KERN_INFO "%s: No MSI support.\n", pbm->name);
sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
}
#else /* CONFIG_PCI_MSI */
static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
......@@ -1237,11 +1055,6 @@ void __init sun4v_pci_init(struct device_node *dp, char *model_name)
p->pbm_B.iommu = iommu;
/* Like PSYCHO and SCHIZO we have a 2GB aligned area
* for memory space.
*/
pci_memspace_mask = 0x7fffffffUL;
pci_sun4v_pbm_init(p, dp, devhandle);
return;
......
......@@ -105,9 +105,11 @@ static struct of_device_id power_match[] = {
};
static struct of_platform_driver power_driver = {
.name = "power",
.match_table = power_match,
.probe = power_probe,
.driver = {
.name = "power",
},
};
void __init power_init(void)
......
......@@ -96,19 +96,21 @@ sun4v_dev_mondo:
stxa %g2, [%g4] ASI_QUEUE
membar #Sync
/* Get &__irq_work[smp_processor_id()] into %g1. */
TRAP_LOAD_IRQ_WORK(%g1, %g4)
TRAP_LOAD_IRQ_WORK_PA(%g1, %g4)
/* Get &ivector_table[IVEC] into %g4. */
sethi %hi(ivector_table), %g4
sllx %g3, 3, %g3
or %g4, %lo(ivector_table), %g4
/* For VIRQs, cookie is encoded as ~bucket_phys_addr */
brlz,pt %g3, 1f
xnor %g3, %g0, %g4
/* Get __pa(&ivector_table[IVEC]) into %g4. */
sethi %hi(ivector_table_pa), %g4
ldx [%g4 + %lo(ivector_table_pa)], %g4
sllx %g3, 4, %g3
add %g4, %g3, %g4
/* Insert ivector_table[] entry into __irq_work[] queue. */
lduw [%g1], %g2 /* g2 = irq_work(cpu) */
stw %g2, [%g4 + 0x00] /* bucket->irq_chain = g2 */
stw %g4, [%g1] /* irq_work(cpu) = bucket */
1: ldx [%g1], %g2
stxa %g2, [%g4] ASI_PHYS_USE_EC
stx %g4, [%g1]
/* Signal the interrupt by setting (1 << pil) in %softint. */
wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
......
......@@ -436,7 +436,7 @@ asmlinkage long sparc_pipe(struct pt_regs *regs)
asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
unsigned long third, void __user *ptr, long fifth)
{
int err;
long err;
/* No need for backward compatibility. We can start fresh... */
if (call <= SEMCTL) {
......@@ -453,16 +453,9 @@ asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
err = sys_semget(first, (int)second, (int)third);
goto out;
case SEMCTL: {
union semun fourth;
err = -EINVAL;
if (!ptr)
goto out;
err = -EFAULT;
if (get_user(fourth.__pad,
(void __user * __user *) ptr))
goto out;
err = sys_semctl(first, (int)second | IPC_64,
(int)third, fourth);
err = sys_semctl(first, third,
(int)second | IPC_64,
(union semun) ptr);
goto out;
}
default:
......
......@@ -764,9 +764,11 @@ static struct of_device_id clock_match[] = {
};
static struct of_platform_driver clock_driver = {
.name = "clock",
.match_table = clock_match,
.probe = clock_probe,
.driver = {
.name = "clock",
},
};
static int __init clock_init(void)
......
......@@ -2569,8 +2569,8 @@ void __init trap_init(void)
offsetof(struct trap_per_cpu, tsb_huge)) ||
(TRAP_PER_CPU_TSB_HUGE_TEMP !=
offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
(TRAP_PER_CPU_IRQ_WORKLIST !=
offsetof(struct trap_per_cpu, irq_worklist)) ||
(TRAP_PER_CPU_IRQ_WORKLIST_PA !=
offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
(TRAP_PER_CPU_CPU_MONDO_QMASK !=
offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
(TRAP_PER_CPU_DEV_MONDO_QMASK !=
......
......@@ -10,105 +10,138 @@ ENTRY(_start)
jiffies = jiffies_64;
SECTIONS
{
swapper_low_pmd_dir = 0x0000000000402000;
. = 0x4000;
.text 0x0000000000404000 :
{
_text = .;
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.gnu.warning)
} =0
_etext = .;
PROVIDE (etext = .);
swapper_low_pmd_dir = 0x0000000000402000;
. = 0x4000;
.text 0x0000000000404000 : {
_text = .;
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.gnu.warning)
} = 0
_etext = .;
PROVIDE (etext = .);
RO_DATA(PAGE_SIZE)
RO_DATA(PAGE_SIZE)
.data : {
DATA_DATA
CONSTRUCTORS
}
.data1 : {
*(.data1)
}
. = ALIGN(64);
.data.cacheline_aligned : {
*(.data.cacheline_aligned)
}
. = ALIGN(64);
.data.read_mostly : {
*(.data.read_mostly)
}
_edata = .;
PROVIDE (edata = .);
.fixup : {
*(.fixup)
}
. = ALIGN(16);
__ex_table : {
__start___ex_table = .;
*(__ex_table)
__stop___ex_table = .;
}
NOTES
.data :
{
DATA_DATA
CONSTRUCTORS
}
.data1 : { *(.data1) }
. = ALIGN(64);
.data.cacheline_aligned : { *(.data.cacheline_aligned) }
. = ALIGN(64);
.data.read_mostly : { *(.data.read_mostly) }
_edata = .;
PROVIDE (edata = .);
.fixup : { *(.fixup) }
. = ALIGN(PAGE_SIZE);
.init.text : {
__init_begin = .;
_sinittext = .;
*(.init.text)
_einittext = .;
}
.init.data : {
*(.init.data)
}
. = ALIGN(16);
.init.setup : {
__setup_start = .;
*(.init.setup)
__setup_end = .;
}
.initcall.init : {
__initcall_start = .;
INITCALLS
__initcall_end = .;
}
.con_initcall.init : {
__con_initcall_start = .;
*(.con_initcall.init)
__con_initcall_end = .;
}
SECURITY_INIT
. = ALIGN(16);
__start___ex_table = .;
__ex_table : { *(__ex_table) }
__stop___ex_table = .;
. = ALIGN(4);
.tsb_ldquad_phys_patch : {
__tsb_ldquad_phys_patch = .;
*(.tsb_ldquad_phys_patch)
__tsb_ldquad_phys_patch_end = .;
}
NOTES
.tsb_phys_patch : {
__tsb_phys_patch = .;
*(.tsb_phys_patch)
__tsb_phys_patch_end = .;
}
. = ALIGN(PAGE_SIZE);
__init_begin = .;
.init.text : {
_sinittext = .;
*(.init.text)
_einittext = .;
}
.init.data : { *(.init.data) }
. = ALIGN(16);
__setup_start = .;
.init.setup : { *(.init.setup) }
__setup_end = .;
__initcall_start = .;
.initcall.init : {
INITCALLS
}
__initcall_end = .;
__con_initcall_start = .;
.con_initcall.init : { *(.con_initcall.init) }
__con_initcall_end = .;
SECURITY_INIT
. = ALIGN(4);
__tsb_ldquad_phys_patch = .;
.tsb_ldquad_phys_patch : { *(.tsb_ldquad_phys_patch) }
__tsb_ldquad_phys_patch_end = .;
__tsb_phys_patch = .;
.tsb_phys_patch : { *(.tsb_phys_patch) }
__tsb_phys_patch_end = .;
__cpuid_patch = .;
.cpuid_patch : { *(.cpuid_patch) }
__cpuid_patch_end = .;
__sun4v_1insn_patch = .;
.sun4v_1insn_patch : { *(.sun4v_1insn_patch) }
__sun4v_1insn_patch_end = .;
__sun4v_2insn_patch = .;
.sun4v_2insn_patch : { *(.sun4v_2insn_patch) }
__sun4v_2insn_patch_end = .;
.cpuid_patch : {
__cpuid_patch = .;
*(.cpuid_patch)
__cpuid_patch_end = .;
}
.sun4v_1insn_patch : {
__sun4v_1insn_patch = .;
*(.sun4v_1insn_patch)
__sun4v_1insn_patch_end = .;
}
.sun4v_2insn_patch : {
__sun4v_2insn_patch = .;
*(.sun4v_2insn_patch)
__sun4v_2insn_patch_end = .;
}
#ifdef CONFIG_BLK_DEV_INITRD
. = ALIGN(PAGE_SIZE);
__initramfs_start = .;
.init.ramfs : { *(.init.ramfs) }
__initramfs_end = .;
. = ALIGN(PAGE_SIZE);
.init.ramfs : {
__initramfs_start = .;
*(.init.ramfs)
__initramfs_end = .;
}
#endif
PERCPU(PAGE_SIZE)
PERCPU(PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .;
__bss_start = .;
.sbss : { *(.sbss) *(.scommon) }
.bss :
{
*(.dynbss)
*(.bss)
*(COMMON)
}
_end = . ;
PROVIDE (end = .);
/DISCARD/ : { *(.exit.text) *(.exit.data) *(.exitcall.exit) }
. = ALIGN(PAGE_SIZE);
__init_end = .;
__bss_start = .;
.sbss : {
*(.sbss)
*(.scommon)
}
.bss : {
*(.dynbss)
*(.bss)
*(COMMON)
}
_end = . ;
PROVIDE (end = .);
STABS_DEBUG
/DISCARD/ : {
*(.exit.text)
*(.exit.data)
*(.exitcall.exit)
}
DWARF_DEBUG
STABS_DEBUG
DWARF_DEBUG
}
......@@ -491,12 +491,12 @@ xor_niagara_4: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
ldda [%i1 + 0x10] %asi, %i2 /* %i2/%i3 = src1 + 0x10 */
xor %g2, %i4, %g2
xor %g3, %i5, %g3
ldda [%i7 + 0x10] %asi, %i4 /* %i4/%i5 = src2 + 0x10 */
ldda [%l7 + 0x10] %asi, %i4 /* %i4/%i5 = src2 + 0x10 */
xor %l0, %g2, %l0
xor %l1, %g3, %l1
stxa %l0, [%i0 + 0x00] %asi
stxa %l1, [%i0 + 0x08] %asi
ldda [%i6 + 0x10] %asi, %g2 /* %g2/%g3 = src3 + 0x10 */
ldda [%l6 + 0x10] %asi, %g2 /* %g2/%g3 = src3 + 0x10 */
ldda [%i0 + 0x10] %asi, %l0 /* %l0/%l1 = dest + 0x10 */
xor %i4, %i2, %i4
......@@ -504,12 +504,12 @@ xor_niagara_4: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */
xor %g2, %i4, %g2
xor %g3, %i5, %g3
ldda [%i7 + 0x20] %asi, %i4 /* %i4/%i5 = src2 + 0x20 */
ldda [%l7 + 0x20] %asi, %i4 /* %i4/%i5 = src2 + 0x20 */
xor %l0, %g2, %l0
xor %l1, %g3, %l1
stxa %l0, [%i0 + 0x10] %asi
stxa %l1, [%i0 + 0x18] %asi
ldda [%i6 + 0x20] %asi, %g2 /* %g2/%g3 = src3 + 0x20 */
ldda [%l6 + 0x20] %asi, %g2 /* %g2/%g3 = src3 + 0x20 */
ldda [%i0 + 0x20] %asi, %l0 /* %l0/%l1 = dest + 0x20 */
xor %i4, %i2, %i4
......@@ -517,12 +517,12 @@ xor_niagara_4: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
ldda [%i1 + 0x30] %asi, %i2 /* %i2/%i3 = src1 + 0x30 */
xor %g2, %i4, %g2
xor %g3, %i5, %g3
ldda [%i7 + 0x30] %asi, %i4 /* %i4/%i5 = src2 + 0x30 */
ldda [%l7 + 0x30] %asi, %i4 /* %i4/%i5 = src2 + 0x30 */
xor %l0, %g2, %l0
xor %l1, %g3, %l1
stxa %l0, [%i0 + 0x20] %asi
stxa %l1, [%i0 + 0x28] %asi
ldda [%i6 + 0x30] %asi, %g2 /* %g2/%g3 = src3 + 0x30 */
ldda [%l6 + 0x30] %asi, %g2 /* %g2/%g3 = src3 + 0x30 */
ldda [%i0 + 0x30] %asi, %l0 /* %l0/%l1 = dest + 0x30 */
prefetch [%i1 + 0x40], #one_read
......
......@@ -631,7 +631,6 @@ void prom_world(int enter)
__asm__ __volatile__("flushw");
}
#ifdef DCACHE_ALIASING_POSSIBLE
void __flush_dcache_range(unsigned long start, unsigned long end)
{
unsigned long va;
......@@ -655,7 +654,6 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
"i" (ASI_DCACHE_INVALIDATE));
}
}
#endif /* DCACHE_ALIASING_POSSIBLE */
/* get_new_mmu_context() uses "cache + 1". */
DEFINE_SPINLOCK(ctx_alloc_lock);
......
......@@ -19,7 +19,6 @@
#include <linux/mm.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/of_device.h>
#include <asm/fbio.h>
......@@ -38,6 +37,7 @@ static void cg6_fillrect(struct fb_info *, const struct fb_fillrect *);
static int cg6_sync(struct fb_info *);
static int cg6_mmap(struct fb_info *, struct vm_area_struct *);
static int cg6_ioctl(struct fb_info *, unsigned int, unsigned long);
static void cg6_copyarea(struct fb_info *info, const struct fb_copyarea *area);
/*
* Frame buffer operations
......@@ -48,7 +48,7 @@ static struct fb_ops cg6_ops = {
.fb_setcolreg = cg6_setcolreg,
.fb_blank = cg6_blank,
.fb_fillrect = cg6_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_copyarea = cg6_copyarea,
.fb_imageblit = cg6_imageblit,
.fb_sync = cg6_sync,
.fb_mmap = cg6_mmap,
......@@ -65,41 +65,41 @@ static struct fb_ops cg6_ops = {
* The FBC could be the frame buffer control
* The FHC could is the frame buffer hardware control.
*/
#define CG6_ROM_OFFSET 0x0UL
#define CG6_BROOKTREE_OFFSET 0x200000UL
#define CG6_DHC_OFFSET 0x240000UL
#define CG6_ALT_OFFSET 0x280000UL
#define CG6_FHC_OFFSET 0x300000UL
#define CG6_THC_OFFSET 0x301000UL
#define CG6_FBC_OFFSET 0x700000UL
#define CG6_TEC_OFFSET 0x701000UL
#define CG6_RAM_OFFSET 0x800000UL
#define CG6_ROM_OFFSET 0x0UL
#define CG6_BROOKTREE_OFFSET 0x200000UL
#define CG6_DHC_OFFSET 0x240000UL
#define CG6_ALT_OFFSET 0x280000UL
#define CG6_FHC_OFFSET 0x300000UL
#define CG6_THC_OFFSET 0x301000UL
#define CG6_FBC_OFFSET 0x700000UL
#define CG6_TEC_OFFSET 0x701000UL
#define CG6_RAM_OFFSET 0x800000UL
/* FHC definitions */
#define CG6_FHC_FBID_SHIFT 24
#define CG6_FHC_FBID_MASK 255
#define CG6_FHC_REV_SHIFT 20
#define CG6_FHC_REV_MASK 15
#define CG6_FHC_FROP_DISABLE (1 << 19)
#define CG6_FHC_ROW_DISABLE (1 << 18)
#define CG6_FHC_SRC_DISABLE (1 << 17)
#define CG6_FHC_DST_DISABLE (1 << 16)
#define CG6_FHC_RESET (1 << 15)
#define CG6_FHC_LITTLE_ENDIAN (1 << 13)
#define CG6_FHC_RES_MASK (3 << 11)
#define CG6_FHC_1024 (0 << 11)
#define CG6_FHC_1152 (1 << 11)
#define CG6_FHC_1280 (2 << 11)
#define CG6_FHC_1600 (3 << 11)
#define CG6_FHC_CPU_MASK (3 << 9)
#define CG6_FHC_CPU_SPARC (0 << 9)
#define CG6_FHC_CPU_68020 (1 << 9)
#define CG6_FHC_CPU_386 (2 << 9)
#define CG6_FHC_TEST (1 << 8)
#define CG6_FHC_TEST_X_SHIFT 4
#define CG6_FHC_TEST_X_MASK 15
#define CG6_FHC_TEST_Y_SHIFT 0
#define CG6_FHC_TEST_Y_MASK 15
#define CG6_FHC_FBID_SHIFT 24
#define CG6_FHC_FBID_MASK 255
#define CG6_FHC_REV_SHIFT 20
#define CG6_FHC_REV_MASK 15
#define CG6_FHC_FROP_DISABLE (1 << 19)
#define CG6_FHC_ROW_DISABLE (1 << 18)
#define CG6_FHC_SRC_DISABLE (1 << 17)
#define CG6_FHC_DST_DISABLE (1 << 16)
#define CG6_FHC_RESET (1 << 15)
#define CG6_FHC_LITTLE_ENDIAN (1 << 13)
#define CG6_FHC_RES_MASK (3 << 11)
#define CG6_FHC_1024 (0 << 11)
#define CG6_FHC_1152 (1 << 11)
#define CG6_FHC_1280 (2 << 11)
#define CG6_FHC_1600 (3 << 11)
#define CG6_FHC_CPU_MASK (3 << 9)
#define CG6_FHC_CPU_SPARC (0 << 9)
#define CG6_FHC_CPU_68020 (1 << 9)
#define CG6_FHC_CPU_386 (2 << 9)
#define CG6_FHC_TEST (1 << 8)
#define CG6_FHC_TEST_X_SHIFT 4
#define CG6_FHC_TEST_X_MASK 15
#define CG6_FHC_TEST_Y_SHIFT 0
#define CG6_FHC_TEST_Y_MASK 15
/* FBC mode definitions */
#define CG6_FBC_BLIT_IGNORE 0x00000000
......@@ -150,17 +150,17 @@ static struct fb_ops cg6_ops = {
#define CG6_FBC_INDEX_MASK 0x00000030
/* THC definitions */
#define CG6_THC_MISC_REV_SHIFT 16
#define CG6_THC_MISC_REV_MASK 15
#define CG6_THC_MISC_RESET (1 << 12)
#define CG6_THC_MISC_VIDEO (1 << 10)
#define CG6_THC_MISC_SYNC (1 << 9)
#define CG6_THC_MISC_VSYNC (1 << 8)
#define CG6_THC_MISC_SYNC_ENAB (1 << 7)
#define CG6_THC_MISC_CURS_RES (1 << 6)
#define CG6_THC_MISC_INT_ENAB (1 << 5)
#define CG6_THC_MISC_INT (1 << 4)
#define CG6_THC_MISC_INIT 0x9f
#define CG6_THC_MISC_REV_SHIFT 16
#define CG6_THC_MISC_REV_MASK 15
#define CG6_THC_MISC_RESET (1 << 12)
#define CG6_THC_MISC_VIDEO (1 << 10)
#define CG6_THC_MISC_SYNC (1 << 9)
#define CG6_THC_MISC_VSYNC (1 << 8)
#define CG6_THC_MISC_SYNC_ENAB (1 << 7)
#define CG6_THC_MISC_CURS_RES (1 << 6)
#define CG6_THC_MISC_INT_ENAB (1 << 5)
#define CG6_THC_MISC_INT (1 << 4)
#define CG6_THC_MISC_INIT 0x9f
/* The contents are unknown */
struct cg6_tec {
......@@ -170,25 +170,25 @@ struct cg6_tec {
};
struct cg6_thc {
u32 thc_pad0[512];
u32 thc_hs; /* hsync timing */
u32 thc_hsdvs;
u32 thc_hd;
u32 thc_vs; /* vsync timing */
u32 thc_vd;
u32 thc_refresh;
u32 thc_misc;
u32 thc_pad1[56];
u32 thc_cursxy; /* cursor x,y position (16 bits each) */
u32 thc_cursmask[32]; /* cursor mask bits */
u32 thc_cursbits[32]; /* what to show where mask enabled */
u32 thc_pad0[512];
u32 thc_hs; /* hsync timing */
u32 thc_hsdvs;
u32 thc_hd;
u32 thc_vs; /* vsync timing */
u32 thc_vd;
u32 thc_refresh;
u32 thc_misc;
u32 thc_pad1[56];
u32 thc_cursxy; /* cursor x,y position (16 bits each) */
u32 thc_cursmask[32]; /* cursor mask bits */
u32 thc_cursbits[32]; /* what to show where mask enabled */
};
struct cg6_fbc {
u32 xxx0[1];
u32 mode;
u32 clip;
u32 xxx1[1];
u32 xxx1[1];
u32 s;
u32 draw;
u32 blit;
......@@ -243,10 +243,10 @@ struct cg6_fbc {
};
struct bt_regs {
u32 addr;
u32 color_map;
u32 control;
u32 cursor;
u32 addr;
u32 color_map;
u32 control;
u32 cursor;
};
struct cg6_par {
......@@ -267,7 +267,7 @@ struct cg6_par {
static int cg6_sync(struct fb_info *info)
{
struct cg6_par *par = (struct cg6_par *) info->par;
struct cg6_par *par = (struct cg6_par *)info->par;
struct cg6_fbc __iomem *fbc = par->fbc;
int limit = 10000;
......@@ -281,24 +281,24 @@ static int cg6_sync(struct fb_info *info)
}
/**
* cg6_fillrect - REQUIRED function. Can use generic routines if
* non acclerated hardware and packed pixel based.
* Draws a rectangle on the screen.
* cg6_fillrect - Draws a rectangle on the screen.
*
* @info: frame buffer structure that represents a single frame buffer
* @rect: structure defining the rectagle and operation.
* @info: frame buffer structure that represents a single frame buffer
* @rect: structure defining the rectagle and operation.
*/
static void cg6_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct cg6_par *par = (struct cg6_par *) info->par;
struct cg6_par *par = (struct cg6_par *)info->par;
struct cg6_fbc __iomem *fbc = par->fbc;
unsigned long flags;
s32 val;
/* XXX doesn't handle ROP_XOR */
/* CG6 doesn't handle ROP_XOR */
spin_lock_irqsave(&par->lock, flags);
cg6_sync(info);
sbus_writel(rect->color, &fbc->fg);
sbus_writel(~(u32)0, &fbc->pixelm);
sbus_writel(0xea80ff00, &fbc->alu);
......@@ -316,16 +316,56 @@ static void cg6_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
}
/**
* cg6_imageblit - REQUIRED function. Can use generic routines if
* non acclerated hardware and packed pixel based.
* Copies a image from system memory to the screen.
* cg6_copyarea - Copies one area of the screen to another area.
*
* @info: frame buffer structure that represents a single frame buffer
* @area: Structure providing the data to copy the framebuffer contents
* from one region to another.
*
* This drawing operation copies a rectangular area from one area of the
* screen to another area.
*/
static void cg6_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
struct cg6_par *par = (struct cg6_par *)info->par;
struct cg6_fbc __iomem *fbc = par->fbc;
unsigned long flags;
int i;
spin_lock_irqsave(&par->lock, flags);
cg6_sync(info);
sbus_writel(0xff, &fbc->fg);
sbus_writel(0x00, &fbc->bg);
sbus_writel(~0, &fbc->pixelm);
sbus_writel(0xe880cccc, &fbc->alu);
sbus_writel(0, &fbc->s);
sbus_writel(0, &fbc->clip);
sbus_writel(area->sy, &fbc->y0);
sbus_writel(area->sx, &fbc->x0);
sbus_writel(area->sy + area->height - 1, &fbc->y1);
sbus_writel(area->sx + area->width - 1, &fbc->x1);
sbus_writel(area->dy, &fbc->y2);
sbus_writel(area->dx, &fbc->x2);
sbus_writel(area->dy + area->height - 1, &fbc->y3);
sbus_writel(area->dx + area->width - 1, &fbc->x3);
do {
i = sbus_readl(&fbc->blit);
} while (i < 0 && (i & 0x20000000));
spin_unlock_irqrestore(&par->lock, flags);
}
/**
* cg6_imageblit - Copies a image from system memory to the screen.
*
* @info: frame buffer structure that represents a single frame buffer
* @image: structure defining the image.
* @info: frame buffer structure that represents a single frame buffer
* @image: structure defining the image.
*/
static void cg6_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct cg6_par *par = (struct cg6_par *) info->par;
struct cg6_par *par = (struct cg6_par *)info->par;
struct cg6_fbc __iomem *fbc = par->fbc;
const u8 *data = image->data;
unsigned long flags;
......@@ -363,7 +403,7 @@ static void cg6_imageblit(struct fb_info *info, const struct fb_image *image)
sbus_writel(y, &fbc->y0);
sbus_writel(x, &fbc->x0);
sbus_writel(x + 32 - 1, &fbc->x1);
val = ((u32)data[0] << 24) |
((u32)data[1] << 16) |
((u32)data[2] << 8) |
......@@ -404,19 +444,20 @@ static void cg6_imageblit(struct fb_info *info, const struct fb_image *image)
}
/**
* cg6_setcolreg - Optional function. Sets a color register.
* @regno: boolean, 0 copy local, 1 get_user() function
* @red: frame buffer colormap structure
* @green: The green value which can be up to 16 bits wide
* @blue: The blue value which can be up to 16 bits wide.
* @transp: If supported the alpha value which can be up to 16 bits wide.
* @info: frame buffer info structure
* cg6_setcolreg - Sets a color register.
*
* @regno: boolean, 0 copy local, 1 get_user() function
* @red: frame buffer colormap structure
* @green: The green value which can be up to 16 bits wide
* @blue: The blue value which can be up to 16 bits wide.
* @transp: If supported the alpha value which can be up to 16 bits wide.
* @info: frame buffer info structure
*/
static int cg6_setcolreg(unsigned regno,
unsigned red, unsigned green, unsigned blue,
unsigned transp, struct fb_info *info)
{
struct cg6_par *par = (struct cg6_par *) info->par;
struct cg6_par *par = (struct cg6_par *)info->par;
struct bt_regs __iomem *bt = par->bt;
unsigned long flags;
......@@ -440,25 +481,24 @@ static int cg6_setcolreg(unsigned regno,
}
/**
* cg6_blank - Optional function. Blanks the display.
* @blank_mode: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
* cg6_blank - Blanks the display.
*
* @blank_mode: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*/
static int
cg6_blank(int blank, struct fb_info *info)
static int cg6_blank(int blank, struct fb_info *info)
{
struct cg6_par *par = (struct cg6_par *) info->par;
struct cg6_par *par = (struct cg6_par *)info->par;
struct cg6_thc __iomem *thc = par->thc;
unsigned long flags;
u32 val;
spin_lock_irqsave(&par->lock, flags);
val = sbus_readl(&thc->thc_misc);
switch (blank) {
case FB_BLANK_UNBLANK: /* Unblanking */
val = sbus_readl(&thc->thc_misc);
val |= CG6_THC_MISC_VIDEO;
sbus_writel(val, &thc->thc_misc);
par->flags &= ~CG6_FLAG_BLANKED;
break;
......@@ -466,13 +506,12 @@ cg6_blank(int blank, struct fb_info *info)
case FB_BLANK_VSYNC_SUSPEND: /* VESA blank (vsync off) */
case FB_BLANK_HSYNC_SUSPEND: /* VESA blank (hsync off) */
case FB_BLANK_POWERDOWN: /* Poweroff */
val = sbus_readl(&thc->thc_misc);
val &= ~CG6_THC_MISC_VIDEO;
sbus_writel(val, &thc->thc_misc);
par->flags |= CG6_FLAG_BLANKED;
break;
}
sbus_writel(val, &thc->thc_misc);
spin_unlock_irqrestore(&par->lock, flags);
return 0;
......@@ -533,7 +572,7 @@ static int cg6_mmap(struct fb_info *info, struct vm_area_struct *vma)
static int cg6_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
{
struct cg6_par *par = (struct cg6_par *) info->par;
struct cg6_par *par = (struct cg6_par *)info->par;
return sbusfb_ioctl_helper(cmd, arg, info,
FBTYPE_SUNFAST_COLOR, 8, par->fbsize);
......@@ -543,15 +582,14 @@ static int cg6_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
* Initialisation
*/
static void
cg6_init_fix(struct fb_info *info, int linebytes)
static void __devinit cg6_init_fix(struct fb_info *info, int linebytes)
{
struct cg6_par *par = (struct cg6_par *)info->par;
const char *cg6_cpu_name, *cg6_card_name;
u32 conf;
conf = sbus_readl(par->fhc);
switch(conf & CG6_FHC_CPU_MASK) {
switch (conf & CG6_FHC_CPU_MASK) {
case CG6_FHC_CPU_SPARC:
cg6_cpu_name = "sparc";
break;
......@@ -563,21 +601,19 @@ cg6_init_fix(struct fb_info *info, int linebytes)
break;
};
if (((conf >> CG6_FHC_REV_SHIFT) & CG6_FHC_REV_MASK) >= 11) {
if (par->fbsize <= 0x100000) {
if (par->fbsize <= 0x100000)
cg6_card_name = "TGX";
} else {
else
cg6_card_name = "TGX+";
}
} else {
if (par->fbsize <= 0x100000) {
if (par->fbsize <= 0x100000)
cg6_card_name = "GX";
} else {
else
cg6_card_name = "GX+";
}
}
sprintf(info->fix.id, "%s %s", cg6_card_name, cg6_cpu_name);
info->fix.id[sizeof(info->fix.id)-1] = 0;
info->fix.id[sizeof(info->fix.id) - 1] = 0;
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
......@@ -588,28 +624,28 @@ cg6_init_fix(struct fb_info *info, int linebytes)
}
/* Initialize Brooktree DAC */
static void cg6_bt_init(struct cg6_par *par)
static void __devinit cg6_bt_init(struct cg6_par *par)
{
struct bt_regs __iomem *bt = par->bt;
sbus_writel(0x04 << 24, &bt->addr); /* color planes */
sbus_writel(0x04 << 24, &bt->addr); /* color planes */
sbus_writel(0xff << 24, &bt->control);
sbus_writel(0x05 << 24, &bt->addr);
sbus_writel(0x00 << 24, &bt->control);
sbus_writel(0x06 << 24, &bt->addr); /* overlay plane */
sbus_writel(0x06 << 24, &bt->addr); /* overlay plane */
sbus_writel(0x73 << 24, &bt->control);
sbus_writel(0x07 << 24, &bt->addr);
sbus_writel(0x00 << 24, &bt->control);
}
static void cg6_chip_init(struct fb_info *info)
static void __devinit cg6_chip_init(struct fb_info *info)
{
struct cg6_par *par = (struct cg6_par *) info->par;
struct cg6_par *par = (struct cg6_par *)info->par;
struct cg6_tec __iomem *tec = par->tec;
struct cg6_fbc __iomem *fbc = par->fbc;
u32 rev, conf, mode;
int i;
/* Turn off stuff in the Transform Engine. */
sbus_writel(0, &tec->tec_matrix);
sbus_writel(0, &tec->tec_clip);
......@@ -635,13 +671,13 @@ static void cg6_chip_init(struct fb_info *info)
i = sbus_readl(&fbc->s);
} while (i & 0x10000000);
mode &= ~(CG6_FBC_BLIT_MASK | CG6_FBC_MODE_MASK |
CG6_FBC_DRAW_MASK | CG6_FBC_BWRITE0_MASK |
CG6_FBC_BWRITE1_MASK | CG6_FBC_BREAD_MASK |
CG6_FBC_BDISP_MASK);
CG6_FBC_DRAW_MASK | CG6_FBC_BWRITE0_MASK |
CG6_FBC_BWRITE1_MASK | CG6_FBC_BREAD_MASK |
CG6_FBC_BDISP_MASK);
mode |= (CG6_FBC_BLIT_SRC | CG6_FBC_MODE_COLOR8 |
CG6_FBC_DRAW_RENDER | CG6_FBC_BWRITE0_ENABLE |
CG6_FBC_BWRITE1_DISABLE | CG6_FBC_BREAD_0 |
CG6_FBC_BDISP_0);
CG6_FBC_DRAW_RENDER | CG6_FBC_BWRITE0_ENABLE |
CG6_FBC_BWRITE1_DISABLE | CG6_FBC_BREAD_0 |
CG6_FBC_BDISP_0);
sbus_writel(mode, &fbc->mode);
sbus_writel(0, &fbc->clip);
......@@ -671,7 +707,8 @@ static void cg6_unmap_regs(struct of_device *op, struct fb_info *info,
of_iounmap(&op->resource[0], info->screen_base, par->fbsize);
}
static int __devinit cg6_probe(struct of_device *op, const struct of_device_id *match)
static int __devinit cg6_probe(struct of_device *op,
const struct of_device_id *match)
{
struct device_node *dp = op->node;
struct fb_info *info;
......@@ -705,22 +742,23 @@ static int __devinit cg6_probe(struct of_device *op, const struct of_device_id *
par->fbsize *= 4;
par->fbc = of_ioremap(&op->resource[0], CG6_FBC_OFFSET,
4096, "cgsix fbc");
4096, "cgsix fbc");
par->tec = of_ioremap(&op->resource[0], CG6_TEC_OFFSET,
sizeof(struct cg6_tec), "cgsix tec");
sizeof(struct cg6_tec), "cgsix tec");
par->thc = of_ioremap(&op->resource[0], CG6_THC_OFFSET,
sizeof(struct cg6_thc), "cgsix thc");
sizeof(struct cg6_thc), "cgsix thc");
par->bt = of_ioremap(&op->resource[0], CG6_BROOKTREE_OFFSET,
sizeof(struct bt_regs), "cgsix dac");
sizeof(struct bt_regs), "cgsix dac");
par->fhc = of_ioremap(&op->resource[0], CG6_FHC_OFFSET,
sizeof(u32), "cgsix fhc");
sizeof(u32), "cgsix fhc");
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT |
FBINFO_READS_FAST;
info->fbops = &cg6_ops;
info->screen_base = of_ioremap(&op->resource[0], CG6_RAM_OFFSET,
par->fbsize, "cgsix ram");
info->screen_base = of_ioremap(&op->resource[0], CG6_RAM_OFFSET,
par->fbsize, "cgsix ram");
if (!par->fbc || !par->tec || !par->thc ||
!par->bt || !par->fhc || !info->screen_base)
goto out_unmap_regs;
......
......@@ -171,17 +171,17 @@ static struct fb_ops ffb_ops = {
#define FFB_PPC_CS_VAR 0x000002
#define FFB_PPC_CS_CONST 0x000003
#define FFB_ROP_NEW 0x83
#define FFB_ROP_OLD 0x85
#define FFB_ROP_NEW_XOR_OLD 0x86
#define FFB_UCSR_FIFO_MASK 0x00000fff
#define FFB_UCSR_FB_BUSY 0x01000000
#define FFB_UCSR_RP_BUSY 0x02000000
#define FFB_UCSR_ALL_BUSY (FFB_UCSR_RP_BUSY|FFB_UCSR_FB_BUSY)
#define FFB_UCSR_READ_ERR 0x40000000
#define FFB_UCSR_FIFO_OVFL 0x80000000
#define FFB_UCSR_ALL_ERRORS (FFB_UCSR_READ_ERR|FFB_UCSR_FIFO_OVFL)
#define FFB_ROP_NEW 0x83
#define FFB_ROP_OLD 0x85
#define FFB_ROP_NEW_XOR_OLD 0x86
#define FFB_UCSR_FIFO_MASK 0x00000fff
#define FFB_UCSR_FB_BUSY 0x01000000
#define FFB_UCSR_RP_BUSY 0x02000000
#define FFB_UCSR_ALL_BUSY (FFB_UCSR_RP_BUSY|FFB_UCSR_FB_BUSY)
#define FFB_UCSR_READ_ERR 0x40000000
#define FFB_UCSR_FIFO_OVFL 0x80000000
#define FFB_UCSR_ALL_ERRORS (FFB_UCSR_READ_ERR|FFB_UCSR_FIFO_OVFL)
struct ffb_fbc {
/* Next vertex registers */
......@@ -197,7 +197,7 @@ struct ffb_fbc {
u32 ryf;
u32 rxf;
u32 xxx3[2];
u32 dmyf;
u32 dmxf;
u32 xxx4[2];
......@@ -211,13 +211,13 @@ struct ffb_fbc {
u32 bh;
u32 bw;
u32 xxx6[2];
u32 xxx7[32];
/* Setup unit vertex state register */
u32 suvtx;
u32 xxx8[63];
/* Control registers */
u32 ppc;
u32 wid;
......@@ -235,7 +235,7 @@ struct ffb_fbc {
u32 dcsb;
u32 dczf;
u32 dczb;
u32 xxx9;
u32 blendc;
u32 blendc1;
......@@ -252,7 +252,7 @@ struct ffb_fbc {
u32 fbcfg1;
u32 fbcfg2;
u32 fbcfg3;
u32 ppcfg;
u32 pick;
u32 fillmode;
......@@ -269,7 +269,7 @@ struct ffb_fbc {
u32 clip2max;
u32 clip3min;
u32 clip3max;
/* New 3dRAM III support regs */
u32 rawblend2;
u32 rawpreblend;
......@@ -287,7 +287,7 @@ struct ffb_fbc {
u32 rawcmp;
u32 rawwac;
u32 fbramid;
u32 drawop;
u32 xxx10[2];
u32 fontlpat;
......@@ -302,7 +302,7 @@ struct ffb_fbc {
u32 stencil;
u32 stencilctl;
u32 xxx13[4];
u32 xxx13[4];
u32 dcss1;
u32 dcss2;
u32 dcss3;
......@@ -315,17 +315,17 @@ struct ffb_fbc {
u32 dcd3;
u32 dcd4;
u32 xxx15;
u32 pattern[32];
u32 xxx16[256];
u32 devid;
u32 xxx17[63];
u32 ucsr;
u32 xxx18[31];
u32 mer;
};
......@@ -336,20 +336,20 @@ struct ffb_dac {
u32 value2;
};
#define FFB_DAC_UCTRL 0x1001 /* User Control */
#define FFB_DAC_UCTRL_MANREV 0x00000f00 /* 4-bit Manufacturing Revision */
#define FFB_DAC_UCTRL_MANREV_SHIFT 8
#define FFB_DAC_TGEN 0x6000 /* Timing Generator */
#define FFB_DAC_TGEN_VIDE 0x00000001 /* Video Enable */
#define FFB_DAC_DID 0x8000 /* Device Identification */
#define FFB_DAC_DID_PNUM 0x0ffff000 /* Device Part Number */
#define FFB_DAC_DID_PNUM_SHIFT 12
#define FFB_DAC_DID_REV 0xf0000000 /* Device Revision */
#define FFB_DAC_DID_REV_SHIFT 28
#define FFB_DAC_UCTRL 0x1001 /* User Control */
#define FFB_DAC_UCTRL_MANREV 0x00000f00 /* 4-bit Manufacturing Revision */
#define FFB_DAC_UCTRL_MANREV_SHIFT 8
#define FFB_DAC_TGEN 0x6000 /* Timing Generator */
#define FFB_DAC_TGEN_VIDE 0x00000001 /* Video Enable */
#define FFB_DAC_DID 0x8000 /* Device Identification */
#define FFB_DAC_DID_PNUM 0x0ffff000 /* Device Part Number */
#define FFB_DAC_DID_PNUM_SHIFT 12
#define FFB_DAC_DID_REV 0xf0000000 /* Device Revision */
#define FFB_DAC_DID_REV_SHIFT 28
#define FFB_DAC_CUR_CTRL 0x100
#define FFB_DAC_CUR_CTRL_P0 0x00000001
#define FFB_DAC_CUR_CTRL_P1 0x00000002
#define FFB_DAC_CUR_CTRL_P0 0x00000001
#define FFB_DAC_CUR_CTRL_P1 0x00000002
struct ffb_par {
spinlock_t lock;
......@@ -382,7 +382,9 @@ static void FFBFifo(struct ffb_par *par, int n)
if (cache - n < 0) {
fbc = par->fbc;
do { cache = (upa_readl(&fbc->ucsr) & FFB_UCSR_FIFO_MASK) - 8;
do {
cache = (upa_readl(&fbc->ucsr) & FFB_UCSR_FIFO_MASK);
cache -= 8;
} while (cache - n < 0);
}
par->fifo_cache = cache - n;
......@@ -401,12 +403,12 @@ static void FFBWait(struct ffb_par *par)
upa_writel(FFB_UCSR_ALL_ERRORS, &fbc->ucsr);
}
udelay(10);
} while(--limit > 0);
} while (--limit > 0);
}
static int ffb_sync(struct fb_info *p)
{
struct ffb_par *par = (struct ffb_par *) p->par;
struct ffb_par *par = (struct ffb_par *)p->par;
FFBWait(par);
return 0;
......@@ -431,8 +433,8 @@ static void ffb_switch_from_graph(struct ffb_par *par)
FFBWait(par);
par->fifo_cache = 0;
FFBFifo(par, 7);
upa_writel(FFB_PPC_VCE_DISABLE|FFB_PPC_TBE_OPAQUE|
FFB_PPC_APE_DISABLE|FFB_PPC_CS_CONST,
upa_writel(FFB_PPC_VCE_DISABLE | FFB_PPC_TBE_OPAQUE |
FFB_PPC_APE_DISABLE | FFB_PPC_CS_CONST,
&fbc->ppc);
upa_writel(0x2000707f, &fbc->fbc);
upa_writel(par->rop_cache, &fbc->rop);
......@@ -455,7 +457,7 @@ static void ffb_switch_from_graph(struct ffb_par *par)
static int ffb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct ffb_par *par = (struct ffb_par *) info->par;
struct ffb_par *par = (struct ffb_par *)info->par;
/* We just use this to catch switches out of
* graphics mode.
......@@ -468,16 +470,14 @@ static int ffb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
}
/**
* ffb_fillrect - REQUIRED function. Can use generic routines if
* non acclerated hardware and packed pixel based.
* Draws a rectangle on the screen.
* ffb_fillrect - Draws a rectangle on the screen.
*
* @info: frame buffer structure that represents a single frame buffer
* @rect: structure defining the rectagle and operation.
* @info: frame buffer structure that represents a single frame buffer
* @rect: structure defining the rectagle and operation.
*/
static void ffb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct ffb_par *par = (struct ffb_par *) info->par;
struct ffb_par *par = (struct ffb_par *)info->par;
struct ffb_fbc __iomem *fbc = par->fbc;
unsigned long flags;
u32 fg;
......@@ -494,9 +494,9 @@ static void ffb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
par->fg_cache = fg;
}
ffb_rop(par, (rect->rop == ROP_COPY ?
FFB_ROP_NEW :
FFB_ROP_NEW_XOR_OLD));
ffb_rop(par, rect->rop == ROP_COPY ?
FFB_ROP_NEW :
FFB_ROP_NEW_XOR_OLD);
FFBFifo(par, 5);
upa_writel(FFB_DRAWOP_RECTANGLE, &fbc->drawop);
......@@ -509,18 +509,15 @@ static void ffb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
}
/**
* ffb_copyarea - REQUIRED function. Can use generic routines if
* non acclerated hardware and packed pixel based.
* Copies on area of the screen to another area.
* ffb_copyarea - Copies on area of the screen to another area.
*
* @info: frame buffer structure that represents a single frame buffer
* @area: structure defining the source and destination.
* @info: frame buffer structure that represents a single frame buffer
* @area: structure defining the source and destination.
*/
static void
ffb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
static void ffb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
struct ffb_par *par = (struct ffb_par *) info->par;
struct ffb_par *par = (struct ffb_par *)info->par;
struct ffb_fbc __iomem *fbc = par->fbc;
unsigned long flags;
......@@ -547,16 +544,14 @@ ffb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
}
/**
* ffb_imageblit - REQUIRED function. Can use generic routines if
* non acclerated hardware and packed pixel based.
* Copies a image from system memory to the screen.
* ffb_imageblit - Copies a image from system memory to the screen.
*
* @info: frame buffer structure that represents a single frame buffer
* @image: structure defining the image.
* @info: frame buffer structure that represents a single frame buffer
* @image: structure defining the image.
*/
static void ffb_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct ffb_par *par = (struct ffb_par *) info->par;
struct ffb_par *par = (struct ffb_par *)info->par;
struct ffb_fbc __iomem *fbc = par->fbc;
const u8 *data = image->data;
unsigned long flags;
......@@ -644,13 +639,14 @@ static void ffb_fixup_var_rgb(struct fb_var_screeninfo *var)
}
/**
* ffb_setcolreg - Optional function. Sets a color register.
* @regno: boolean, 0 copy local, 1 get_user() function
* @red: frame buffer colormap structure
* @green: The green value which can be up to 16 bits wide
* @blue: The blue value which can be up to 16 bits wide.
* @transp: If supported the alpha value which can be up to 16 bits wide.
* @info: frame buffer info structure
* ffb_setcolreg - Sets a color register.
*
* @regno: boolean, 0 copy local, 1 get_user() function
* @red: frame buffer colormap structure
* @green: The green value which can be up to 16 bits wide
* @blue: The blue value which can be up to 16 bits wide.
* @transp: If supported the alpha value which can be up to 16 bits wide.
* @info: frame buffer info structure
*/
static int ffb_setcolreg(unsigned regno,
unsigned red, unsigned green, unsigned blue,
......@@ -672,14 +668,13 @@ static int ffb_setcolreg(unsigned regno,
}
/**
* ffb_blank - Optional function. Blanks the display.
* @blank_mode: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
* ffb_blank - Optional function. Blanks the display.
* @blank_mode: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*/
static int
ffb_blank(int blank, struct fb_info *info)
static int ffb_blank(int blank, struct fb_info *info)
{
struct ffb_par *par = (struct ffb_par *) info->par;
struct ffb_par *par = (struct ffb_par *)info->par;
struct ffb_dac __iomem *dac = par->dac;
unsigned long flags;
u32 val;
......@@ -867,7 +862,7 @@ static int ffb_mmap(struct fb_info *info, struct vm_area_struct *vma)
static int ffb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
{
struct ffb_par *par = (struct ffb_par *) info->par;
struct ffb_par *par = (struct ffb_par *)info->par;
return sbusfb_ioctl_helper(cmd, arg, info,
FBTYPE_CREATOR, 24, par->fbsize);
......@@ -877,8 +872,7 @@ static int ffb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
* Initialisation
*/
static void
ffb_init_fix(struct fb_info *info)
static void ffb_init_fix(struct fb_info *info)
{
struct ffb_par *par = (struct ffb_par *)info->par;
const char *ffb_type_name;
......@@ -902,7 +896,8 @@ ffb_init_fix(struct fb_info *info)
info->fix.accel = FB_ACCEL_SUN_CREATOR;
}
static int __devinit ffb_probe(struct of_device *op, const struct of_device_id *match)
static int __devinit ffb_probe(struct of_device *op,
const struct of_device_id *match)
{
struct device_node *dp = op->node;
struct ffb_fbc __iomem *fbc;
......
/*
* include/asm-sparc/irqflags.h
*
* IRQ flags handling
*
* This file gets included from lowlevel asm headers too, to provide
* wrapped versions of the local_irq_*() APIs, based on the
* raw_local_irq_*() functions from the lowlevel headers.
*/
#ifndef _ASM_IRQFLAGS_H
#define _ASM_IRQFLAGS_H
#ifndef __ASSEMBLY__
extern void raw_local_irq_restore(unsigned long);
extern unsigned long __raw_local_irq_save(void);
extern void raw_local_irq_enable(void);
static inline unsigned long getipl(void)
{
unsigned long retval;
__asm__ __volatile__("rd %%psr, %0" : "=r" (retval));
return retval;
}
#define raw_local_save_flags(flags) ((flags) = getipl())
#define raw_local_irq_save(flags) ((flags) = __raw_local_irq_save())
#define raw_local_irq_disable() ((void) __raw_local_irq_save())
#define raw_irqs_disabled() ((getipl() & PSR_PIL) != 0)
static inline int raw_irqs_disabled_flags(unsigned long flags)
{
return ((flags & PSR_PIL) != 0);
}
#endif /* (__ASSEMBLY__) */
#endif /* !(_ASM_IRQFLAGS_H) */
......@@ -15,6 +15,8 @@
#ifndef __ASSEMBLY__
#include <linux/irqflags.h>
/*
* Sparc (general) CPU types
*/
......@@ -164,26 +166,6 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
"o0", "o1", "o2", "o3", "o7"); \
} while(0)
/*
* Changing the IRQ level on the Sparc.
*/
extern void local_irq_restore(unsigned long);
extern unsigned long __local_irq_save(void);
extern void local_irq_enable(void);
static inline unsigned long getipl(void)
{
unsigned long retval;
__asm__ __volatile__("rd %%psr, %0" : "=r" (retval));
return retval;
}
#define local_save_flags(flags) ((flags) = getipl())
#define local_irq_save(flags) ((flags) = __local_irq_save())
#define local_irq_disable() ((void) __local_irq_save())
#define irqs_disabled() ((getipl() & PSR_PIL) != 0)
/* XXX Change this if we ever use a PSO mode kernel. */
#define mb() __asm__ __volatile__ ("" : : : "memory")
#define rmb() mb()
......
......@@ -75,12 +75,11 @@ struct trap_per_cpu {
unsigned long tsb_huge_temp;
/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
unsigned int irq_worklist;
unsigned long irq_worklist_pa;
unsigned int cpu_mondo_qmask;
unsigned int dev_mondo_qmask;
unsigned int resum_qmask;
unsigned int nonresum_qmask;
unsigned int __pad2[1];
void *hdesc;
} __attribute__((aligned(64)));
extern struct trap_per_cpu trap_block[NR_CPUS];
......@@ -128,11 +127,11 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
#define TRAP_PER_CPU_TSB_HUGE 0xd0
#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
#define TRAP_PER_CPU_IRQ_WORKLIST 0xe0
#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe4
#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xe8
#define TRAP_PER_CPU_RESUM_QMASK 0xec
#define TRAP_PER_CPU_NONRESUM_QMASK 0xf0
#define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0
#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8
#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec
#define TRAP_PER_CPU_RESUM_QMASK 0xf0
#define TRAP_PER_CPU_NONRESUM_QMASK 0xf4
#define TRAP_BLOCK_SZ_SHIFT 8
......@@ -184,9 +183,9 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
#define TRAP_LOAD_IRQ_WORK(DEST, TMP) \
#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
add DEST, TRAP_PER_CPU_IRQ_WORKLIST, DEST;
add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
/* Clobbers TMP, loads DEST with current thread info pointer. */
#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
......@@ -223,9 +222,9 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
#define TRAP_LOAD_IRQ_WORK(DEST, TMP) \
#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
add DEST, TRAP_PER_CPU_IRQ_WORKLIST, DEST;
add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
......
......@@ -51,10 +51,19 @@ extern unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p,
unsigned int msi_devino_start,
unsigned int msi_devino_end);
extern void sun4v_destroy_msi(unsigned int virt_irq);
extern unsigned int sun4u_build_msi(u32 portid, unsigned int *virt_irq_p,
unsigned int msi_devino_start,
unsigned int msi_devino_end,
unsigned long imap_base,
unsigned long iclr_base);
extern void sun4u_destroy_msi(unsigned int virt_irq);
extern unsigned int sbus_build_irq(void *sbus, unsigned int ino);
extern void sparc64_set_msi(unsigned int virt_irq, u32 msi);
extern u32 sparc64_get_msi(unsigned int virt_irq);
extern unsigned char virt_irq_alloc(unsigned int dev_handle,
unsigned int dev_ino);
#ifdef CONFIG_PCI_MSI
extern void virt_irq_free(unsigned int virt_irq);
#endif
extern void fixup_irqs(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment