Commit 85da1fb5 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (53 commits)
  serial: Add driver for the Cell Network Processor serial port NWP device
  powerpc: enable dynamic ftrace
  powerpc/cell: Fix the prototype of create_vma_map()
  powerpc/mm: Make clear_fixmap() actually work
  powerpc/kdump: Use ppc_save_regs() in crash_setup_regs()
  powerpc: Export cacheable_memzero as its now used in a driver
  powerpc: Fix missing semicolons in mmu_decl.h
  powerpc/pasemi: local_irq_save uses an unsigned long
  powerpc/cell: Fix some u64 vs. long types
  powerpc/cell: Use correct types in beat files
  powerpc: Use correct type in prom_init.c
  powerpc: Remove unnecessary casts
  mtd/ps3vram: Use _PAGE_NO_CACHE in memory ioremap
  mtd/ps3vram: Use msleep in waits
  mtd/ps3vram: Use proper kernel types
  mtd/ps3vram: Cleanup ps3vram driver messages
  mtd/ps3vram: Remove ps3vram debug routines
  mtd/ps3vram: Add modalias support to the ps3vram driver
  mtd/ps3vram: Add ps3vram driver for accessing video RAM as MTD
  powerpc: Fix iseries drivers build failure without CONFIG_VIOPATH
  ...
parents 73ac36ea 5886188d
......@@ -18,7 +18,7 @@ This is the memory-mapped registers for on board FPGA.
Required properities:
- compatible : should be "fsl,fpga-pixis".
- reg : should contain the address and the lenght of the FPPGA register
- reg : should contain the address and the length of the FPPGA register
set.
Example (MPC8610HPCD):
......@@ -27,3 +27,33 @@ Example (MPC8610HPCD):
compatible = "fsl,fpga-pixis";
reg = <0xe8000000 32>;
};
* Freescale BCSR GPIO banks
Some BCSR registers act as simple GPIO controllers, each such
register can be represented by the gpio-controller node.
Required properities:
- compatible : Should be "fsl,<board>-bcsr-gpio".
- reg : Should contain the address and the length of the GPIO bank
register.
- #gpio-cells : Should be two. The first cell is the pin number and the
second cell is used to specify optional paramters (currently unused).
- gpio-controller : Marks the port as GPIO controller.
Example:
bcsr@1,0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "fsl,mpc8360mds-bcsr";
reg = <1 0 0x8000>;
ranges = <0 1 0 0x8000>;
bcsr13: gpio-controller@d {
#gpio-cells = <2>;
compatible = "fsl,mpc8360mds-bcsr-gpio";
reg = <0xd 1>;
gpio-controller;
};
};
......@@ -3489,6 +3489,12 @@ L: linuxppc-dev@ozlabs.org
L: cbe-oss-dev@ozlabs.org
S: Supported
PS3VRAM DRIVER
P: Jim Paris
M: jim@jtan.com
L: cbe-oss-dev@ozlabs.org
S: Maintained
PVRUSB2 VIDEO4LINUX DRIVER
P: Mike Isely
M: isely@pobox.com
......
......@@ -108,6 +108,8 @@ config ARCH_NO_VIRT_TO_BUS
config PPC
bool
default y
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_TRACER
select ARCH_WANT_OPTIONAL_GPIOLIB
select HAVE_IDE
......@@ -326,7 +328,8 @@ config KEXEC
config CRASH_DUMP
bool "Build a kdump crash kernel"
depends on (PPC64 && RELOCATABLE) || 6xx
depends on PPC64 || 6xx
select RELOCATABLE if PPC64
help
Build a kernel suitable for use as a kdump capture kernel.
The same kernel binary can be used as production kernel and dump
......
......@@ -356,7 +356,7 @@ $(obj)/zImage.initrd: $(addprefix $(obj)/, $(initrd-y))
@rm -f $@; ln $< $@
install: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" $<
sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" $^
# anything not in $(targets)
clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \
......
......@@ -69,8 +69,18 @@ flash@0,0 {
};
bcsr@1,0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "fsl,mpc8360mds-bcsr";
reg = <1 0 0x8000>;
ranges = <0 1 0 0x8000>;
bcsr13: gpio-controller@d {
#gpio-cells = <2>;
compatible = "fsl,mpc8360mds-bcsr-gpio";
reg = <0xd 1>;
gpio-controller;
};
};
};
......@@ -195,10 +205,21 @@ ipic: pic@700 {
};
par_io@1400 {
#address-cells = <1>;
#size-cells = <1>;
reg = <0x1400 0x100>;
ranges = <0 0x1400 0x100>;
device_type = "par_io";
num-ports = <7>;
qe_pio_b: gpio-controller@18 {
#gpio-cells = <2>;
compatible = "fsl,mpc8360-qe-pario-bank",
"fsl,mpc8323-qe-pario-bank";
reg = <0x18 0x18>;
gpio-controller;
};
pio1: ucc_pin@01 {
pio-map = <
/* port pin dir open_drain assignment has_irq */
......@@ -282,6 +303,15 @@ data-only@0 {
};
};
timer@440 {
compatible = "fsl,mpc8360-qe-gtm",
"fsl,qe-gtm", "fsl,gtm";
reg = <0x440 0x40>;
clock-frequency = <132000000>;
interrupts = <12 13 14 15>;
interrupt-parent = <&qeic>;
};
spi@4c0 {
cell-index = <0>;
compatible = "fsl,spi";
......@@ -301,11 +331,20 @@ spi@500 {
};
usb@6c0 {
compatible = "qe_udc";
compatible = "fsl,mpc8360-qe-usb",
"fsl,mpc8323-qe-usb";
reg = <0x6c0 0x40 0x8b00 0x100>;
interrupts = <11>;
interrupt-parent = <&qeic>;
mode = "slave";
fsl,fullspeed-clock = "clk21";
fsl,lowspeed-clock = "brg9";
gpios = <&qe_pio_b 2 0 /* USBOE */
&qe_pio_b 3 0 /* USBTP */
&qe_pio_b 8 0 /* USBTN */
&qe_pio_b 9 0 /* USBRP */
&qe_pio_b 11 0 /* USBRN */
&bcsr13 5 0 /* SPEED */
&bcsr13 4 1>; /* POWER */
};
enet0: ucc@2000 {
......
......@@ -218,8 +218,23 @@ timer@440 {
reg = <0x440 0x40>;
interrupts = <12 13 14 15>;
interrupt-parent = <&qeic>;
/* filled by u-boot */
clock-frequency = <0>;
clock-frequency = <166666666>;
};
usb@6c0 {
compatible = "fsl,mpc8360-qe-usb",
"fsl,mpc8323-qe-usb";
reg = <0x6c0 0x40 0x8b00 0x100>;
interrupts = <11>;
interrupt-parent = <&qeic>;
fsl,fullspeed-clock = "clk21";
gpios = <&qe_pio_b 2 0 /* USBOE */
&qe_pio_b 3 0 /* USBTP */
&qe_pio_b 8 0 /* USBTN */
&qe_pio_b 9 0 /* USBRP */
&qe_pio_b 11 0 /* USBRN */
&qe_pio_e 20 0 /* SPEED */
&qe_pio_e 21 1 /* POWER */>;
};
spi@4c0 {
......
......@@ -26,7 +26,13 @@ aliases {
serial1 = &serial1;
pci0 = &pci0;
pci1 = &pci1;
rapidio0 = &rapidio0;
/*
* Only one of Rapid IO or PCI can be present due to HW limitations and
* due to the fact that the 2 now share address space in the new memory
* map. The most likely case is that we have PCI, so comment out the
* rapidio node. Leave it here for reference.
*/
/* rapidio0 = &rapidio0; */
};
cpus {
......@@ -62,18 +68,17 @@ memory {
reg = <0x00000000 0x40000000>; // 1G at 0x0
};
localbus@f8005000 {
localbus@ffe05000 {
#address-cells = <2>;
#size-cells = <1>;
compatible = "fsl,mpc8641-localbus", "simple-bus";
reg = <0xf8005000 0x1000>;
reg = <0xffe05000 0x1000>;
interrupts = <19 2>;
interrupt-parent = <&mpic>;
ranges = <0 0 0xff800000 0x00800000
1 0 0xfe000000 0x01000000
2 0 0xf8200000 0x00100000
3 0 0xf8100000 0x00100000>;
ranges = <0 0 0xef800000 0x00800000
2 0 0xffdf8000 0x00008000
3 0 0xffdf0000 0x00008000>;
flash@0,0 {
compatible = "cfi-flash";
......@@ -103,13 +108,13 @@ partition@700000 {
};
};
soc8641@f8000000 {
soc8641@ffe00000 {
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
compatible = "simple-bus";
ranges = <0x00000000 0xf8000000 0x00100000>;
reg = <0xf8000000 0x00001000>; // CCSRBAR
ranges = <0x00000000 0xffe00000 0x00100000>;
reg = <0xffe00000 0x00001000>; // CCSRBAR
bus-frequency = <0>;
i2c@3000 {
......@@ -340,17 +345,17 @@ global-utilities@e0000 {
};
};
pci0: pcie@f8008000 {
pci0: pcie@ffe08000 {
cell-index = <0>;
compatible = "fsl,mpc8641-pcie";
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
reg = <0xf8008000 0x1000>;
reg = <0xffe08000 0x1000>;
bus-range = <0x0 0xff>;
ranges = <0x02000000 0x0 0x80000000 0x80000000 0x0 0x20000000
0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00100000>;
0x01000000 0x0 0x00000000 0xffc00000 0x0 0x00010000>;
clock-frequency = <33333333>;
interrupt-parent = <&mpic>;
interrupts = <24 2>;
......@@ -481,7 +486,7 @@ pcie@0 {
0x01000000 0x0 0x00000000
0x01000000 0x0 0x00000000
0x0 0x00100000>;
0x0 0x00010000>;
uli1575@0 {
reg = <0 0 0 0 0>;
#size-cells = <2>;
......@@ -491,7 +496,7 @@ uli1575@0 {
0x0 0x20000000
0x01000000 0x0 0x00000000
0x01000000 0x0 0x00000000
0x0 0x00100000>;
0x0 0x00010000>;
isa@1e {
device_type = "isa";
#interrupt-cells = <2>;
......@@ -549,17 +554,17 @@ gpio@400 {
};
pci1: pcie@f8009000 {
pci1: pcie@ffe09000 {
cell-index = <1>;
compatible = "fsl,mpc8641-pcie";
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
reg = <0xf8009000 0x1000>;
reg = <0xffe09000 0x1000>;
bus-range = <0 0xff>;
ranges = <0x02000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000
0x01000000 0x0 0x00000000 0xe3000000 0x0 0x00100000>;
0x01000000 0x0 0x00000000 0xffc10000 0x0 0x00010000>;
clock-frequency = <33333333>;
interrupt-parent = <&mpic>;
interrupts = <25 2>;
......@@ -582,18 +587,21 @@ pcie@0 {
0x01000000 0x0 0x00000000
0x01000000 0x0 0x00000000
0x0 0x00100000>;
0x0 0x00010000>;
};
};
rapidio0: rapidio@f80c0000 {
/*
rapidio0: rapidio@ffec0000 {
#address-cells = <2>;
#size-cells = <2>;
compatible = "fsl,rapidio-delta";
reg = <0xf80c0000 0x20000>;
ranges = <0 0 0xc0000000 0 0x20000000>;
reg = <0xffec0000 0x20000>;
ranges = <0 0 0x80000000 0 0x20000000>;
interrupt-parent = <&mpic>;
/* err_irq bell_outb_irq bell_inb_irq
msg1_tx_irq msg1_rx_irq msg2_tx_irq msg2_rx_irq */
// err_irq bell_outb_irq bell_inb_irq
// msg1_tx_irq msg1_rx_irq msg2_tx_irq msg2_rx_irq
interrupts = <48 2 49 2 50 2 53 2 54 2 55 2 56 2>;
};
*/
};
......@@ -15,7 +15,7 @@
# $2 - kernel image file
# $3 - kernel map file
# $4 - default install path (blank if root directory)
# $5 - kernel boot file, the zImage
# $5 and more - kernel boot files; zImage*, uImage, cuImage.*, etc.
#
# User may have a custom install script
......@@ -38,3 +38,15 @@ fi
cat $2 > $4/$image_name
cp $3 $4/System.map
# Copy all the bootable image files
path=$4
shift 4
while [ $# -ne 0 ]; do
image_name=`basename $1`
if [ -f $path/$image_name ]; then
mv $path/$image_name $path/$image_name.old
fi
cat $1 > $path/$image_name
shift
done;
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.28-rc3
# Sat Nov 8 12:40:13 2008
# Linux kernel version: 2.6.28-rc8
# Tue Dec 30 11:17:46 2008
#
# CONFIG_PPC64 is not set
......@@ -21,7 +21,10 @@ CONFIG_FSL_BOOKE=y
CONFIG_FSL_EMB_PERFMON=y
# CONFIG_PHYS_64BIT is not set
CONFIG_SPE=y
CONFIG_PPC_MMU_NOHASH=y
# CONFIG_PPC_MM_SLICES is not set
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_PPC32=y
CONFIG_WORD_SIZE=32
# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
......@@ -50,7 +53,7 @@ CONFIG_ARCH_MAY_HAVE_PC_FDC=y
CONFIG_PPC_OF=y
CONFIG_OF=y
CONFIG_PPC_UDBG_16550=y
# CONFIG_GENERIC_TBSYNC is not set
CONFIG_GENERIC_TBSYNC=y
CONFIG_AUDIT_ARCH=y
CONFIG_GENERIC_BUG=y
CONFIG_DEFAULT_UIMAGE=y
......@@ -62,7 +65,7 @@ CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
# General setup
#
CONFIG_EXPERIMENTAL=y
CONFIG_BROKEN_ON_SMP=y
CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
......@@ -126,6 +129,7 @@ CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_USE_GENERIC_SMP_HELPERS=y
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
......@@ -138,6 +142,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_KMOD=y
CONFIG_STOP_MACHINE=y
CONFIG_BLOCK=y
CONFIG_LBD=y
# CONFIG_BLK_DEV_IO_TRACE is not set
......@@ -197,6 +202,7 @@ CONFIG_PPC_I8259=y
# CONFIG_CPM2 is not set
CONFIG_FSL_ULI1575=y
# CONFIG_MPC8xxx_GPIO is not set
# CONFIG_SIMPLE_GPIO is not set
#
# Kernel options
......@@ -224,6 +230,7 @@ CONFIG_MATH_EMULATION=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_HAS_WALK_MEMORY=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
# CONFIG_IRQ_ALL_CPUS is not set
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_SELECT_MEMORY_MODEL=y
......@@ -241,6 +248,9 @@ CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
CONFIG_VIRT_TO_BUS=y
CONFIG_UNEVICTABLE_LRU=y
CONFIG_PPC_4K_PAGES=y
# CONFIG_PPC_16K_PAGES is not set
# CONFIG_PPC_64K_PAGES is not set
CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_PROC_DEVICETREE=y
# CONFIG_CMDLINE_BOOL is not set
......@@ -443,8 +453,10 @@ CONFIG_MISC_DEVICES=y
# CONFIG_EEPROM_93CX6 is not set
# CONFIG_SGI_IOC4 is not set
# CONFIG_TIFM_CORE is not set
# CONFIG_ICS932S401 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_HP_ILO is not set
# CONFIG_C2PORT is not set
CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
......@@ -784,6 +796,7 @@ CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_HVC_UDBG is not set
# CONFIG_IPMI_HANDLER is not set
CONFIG_HW_RANDOM=y
CONFIG_NVRAM=y
......@@ -869,11 +882,11 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
# CONFIG_THERMAL is not set
# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
#
# Sonics Silicon Backplane
#
CONFIG_SSB_POSSIBLE=y
# CONFIG_SSB is not set
#
......@@ -886,14 +899,7 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM8350_I2C is not set
#
# Voltage and Current regulators
#
# CONFIG_REGULATOR is not set
# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
# CONFIG_REGULATOR_BQ24022 is not set
#
# Multimedia devices
......@@ -1252,11 +1258,11 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
# CONFIG_USB_TMC is not set
#
# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
#
#
# may also be needed; see USB_STORAGE Help for more information
# see USB_STORAGE Help for more information
#
CONFIG_USB_STORAGE=y
# CONFIG_USB_STORAGE_DEBUG is not set
......@@ -1348,6 +1354,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_M41T80 is not set
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
#
# SPI RTC drivers
......@@ -1624,6 +1631,7 @@ CONFIG_HAVE_FUNCTION_TRACER=y
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
CONFIG_PRINT_STACK_DEPTH=64
# CONFIG_DEBUG_STACKOVERFLOW is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_DEBUG_PAGEALLOC is not set
......@@ -1649,11 +1657,16 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=y
CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_BLKCIPHER=y
CONFIG_CRYPTO_BLKCIPHER2=y
CONFIG_CRYPTO_HASH=y
CONFIG_CRYPTO_RNG=y
CONFIG_CRYPTO_HASH2=y
CONFIG_CRYPTO_RNG2=y
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_MANAGER2=y
# CONFIG_CRYPTO_GF128MUL is not set
# CONFIG_CRYPTO_NULL is not set
# CONFIG_CRYPTO_CRYPTD is not set
......
......@@ -89,6 +89,8 @@
#define TIOCSBRK 0x5427 /* BSD compatibility */
#define TIOCCBRK 0x5428 /* BSD compatibility */
#define TIOCGSID 0x5429 /* Return the session ID of FD */
#define TIOCGRS485 0x542e
#define TIOCSRS485 0x542f
#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
......
......@@ -48,63 +48,8 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
{
if (oldregs)
memcpy(newregs, oldregs, sizeof(*newregs));
#ifdef __powerpc64__
else {
/* FIXME Merge this with xmon_save_regs ?? */
unsigned long tmp1, tmp2;
__asm__ __volatile__ (
"std 0,0(%2)\n"
"std 1,8(%2)\n"
"std 2,16(%2)\n"
"std 3,24(%2)\n"
"std 4,32(%2)\n"
"std 5,40(%2)\n"
"std 6,48(%2)\n"
"std 7,56(%2)\n"
"std 8,64(%2)\n"
"std 9,72(%2)\n"
"std 10,80(%2)\n"
"std 11,88(%2)\n"
"std 12,96(%2)\n"
"std 13,104(%2)\n"
"std 14,112(%2)\n"
"std 15,120(%2)\n"
"std 16,128(%2)\n"
"std 17,136(%2)\n"
"std 18,144(%2)\n"
"std 19,152(%2)\n"
"std 20,160(%2)\n"
"std 21,168(%2)\n"
"std 22,176(%2)\n"
"std 23,184(%2)\n"
"std 24,192(%2)\n"
"std 25,200(%2)\n"
"std 26,208(%2)\n"
"std 27,216(%2)\n"
"std 28,224(%2)\n"
"std 29,232(%2)\n"
"std 30,240(%2)\n"
"std 31,248(%2)\n"
"mfmsr %0\n"
"std %0, 264(%2)\n"
"mfctr %0\n"
"std %0, 280(%2)\n"
"mflr %0\n"
"std %0, 288(%2)\n"
"bl 1f\n"
"1: mflr %1\n"
"std %1, 256(%2)\n"
"mtlr %0\n"
"mfxer %0\n"
"std %0, 296(%2)\n"
: "=&r" (tmp1), "=&r" (tmp2)
: "b" (newregs)
: "memory");
}
#else
else
ppc_save_regs(newregs);
#endif /* __powerpc64__ */
}
extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
......
......@@ -320,6 +320,7 @@ enum ps3_match_id {
enum ps3_match_sub_id {
PS3_MATCH_SUB_ID_GPU_FB = 1,
PS3_MATCH_SUB_ID_GPU_RAMDISK = 2,
};
#define PS3_MODULE_ALIAS_EHCI "ps3:1:0"
......@@ -332,6 +333,7 @@ enum ps3_match_sub_id {
#define PS3_MODULE_ALIAS_STOR_FLASH "ps3:8:0"
#define PS3_MODULE_ALIAS_SOUND "ps3:9:0"
#define PS3_MODULE_ALIAS_GPU_FB "ps3:10:1"
#define PS3_MODULE_ALIAS_GPU_RAMDISK "ps3:10:2"
#define PS3_MODULE_ALIAS_LPM "ps3:11:0"
enum ps3_system_bus_device_type {
......
......@@ -17,6 +17,8 @@
#ifdef __KERNEL__
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <asm/cpm.h>
#include <asm/immap_qe.h>
......@@ -84,7 +86,11 @@ static inline bool qe_clock_is_brg(enum qe_clock clk)
extern spinlock_t cmxgcr_lock;
/* Export QE common operations */
#ifdef CONFIG_QUICC_ENGINE
extern void __init qe_reset(void);
#else
static inline void qe_reset(void) {}
#endif
/* QE PIO */
#define QE_PIO_PINS 32
......@@ -101,16 +107,43 @@ struct qe_pio_regs {
#endif
};
extern int par_io_init(struct device_node *np);
extern int par_io_of_config(struct device_node *np);
#define QE_PIO_DIR_IN 2
#define QE_PIO_DIR_OUT 1
extern void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin,
int dir, int open_drain, int assignment,
int has_irq);
#ifdef CONFIG_QUICC_ENGINE
extern int par_io_init(struct device_node *np);
extern int par_io_of_config(struct device_node *np);
extern int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
int assignment, int has_irq);
extern int par_io_data_set(u8 port, u8 pin, u8 val);
#else
static inline int par_io_init(struct device_node *np) { return -ENOSYS; }
static inline int par_io_of_config(struct device_node *np) { return -ENOSYS; }
static inline int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
int assignment, int has_irq) { return -ENOSYS; }
static inline int par_io_data_set(u8 port, u8 pin, u8 val) { return -ENOSYS; }
#endif /* CONFIG_QUICC_ENGINE */
/*
* Pin multiplexing functions.
*/
struct qe_pin;
#ifdef CONFIG_QE_GPIO
extern struct qe_pin *qe_pin_request(struct device_node *np, int index);
extern void qe_pin_free(struct qe_pin *qe_pin);
extern void qe_pin_set_gpio(struct qe_pin *qe_pin);
extern void qe_pin_set_dedicated(struct qe_pin *pin);
#else
static inline struct qe_pin *qe_pin_request(struct device_node *np, int index)
{
return ERR_PTR(-ENOSYS);
}
static inline void qe_pin_free(struct qe_pin *qe_pin) {}
static inline void qe_pin_set_gpio(struct qe_pin *qe_pin) {}
static inline void qe_pin_set_dedicated(struct qe_pin *pin) {}
#endif /* CONFIG_QE_GPIO */
/* QE internal API */
int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input);
......
......@@ -17,6 +17,9 @@
#include <linux/irq.h>
struct device_node;
struct qe_ic;
#define NUM_OF_QE_IC_GROUPS 6
/* Flags when we init the QE IC */
......@@ -54,17 +57,27 @@ enum qe_ic_grp_id {
QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */
};
#ifdef CONFIG_QUICC_ENGINE
void qe_ic_init(struct device_node *node, unsigned int flags,
void (*low_handler)(unsigned int irq, struct irq_desc *desc),
void (*high_handler)(unsigned int irq, struct irq_desc *desc));
unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
#else
static inline void qe_ic_init(struct device_node *node, unsigned int flags,
void (*low_handler)(unsigned int irq, struct irq_desc *desc),
void (*high_handler)(unsigned int irq, struct irq_desc *desc))
{}
static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
{ return 0; }
static inline unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
{ return 0; }
#endif /* CONFIG_QUICC_ENGINE */
void qe_ic_set_highest_priority(unsigned int virq, int high);
int qe_ic_set_priority(unsigned int virq, unsigned int priority);
int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
struct qe_ic;
unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
static inline void qe_ic_cascade_low_ipic(unsigned int irq,
struct irq_desc *desc)
{
......
......@@ -128,7 +128,7 @@ struct spu {
int number;
unsigned int irqs[3];
u32 node;
u64 flags;
unsigned long flags;
u64 class_0_pending;
u64 class_0_dar;
u64 class_1_dar;
......
......@@ -29,7 +29,7 @@ endif
obj-y := cputable.o ptrace.o syscalls.o \
irq.o align.o signal_32.o pmc.o vdso.o \
init_task.o process.o systbl.o idle.o \
signal.o sysfs.o
signal.o sysfs.o cacheinfo.o
obj-y += vdso32/
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
signal_64.o ptrace32.o \
......
/*
* Processor cache information made available to userspace via sysfs;
* intended to be compatible with x86 intel_cacheinfo implementation.
*
* Copyright 2008 IBM Corporation
* Author: Nathan Lynch
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*/
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/percpu.h>
#include <asm/prom.h>
#include "cacheinfo.h"
/* per-cpu object for tracking:
* - a "cache" kobject for the top-level directory
* - a list of "index" objects representing the cpu's local cache hierarchy
*/
struct cache_dir {
struct kobject *kobj; /* bare (not embedded) kobject for cache
* directory */
struct cache_index_dir *index; /* list of index objects */
};
/* "index" object: each cpu's cache directory has an index
* subdirectory corresponding to a cache object associated with the
* cpu. This object's lifetime is managed via the embedded kobject.
*/
struct cache_index_dir {
struct kobject kobj;
struct cache_index_dir *next; /* next index in parent directory */
struct cache *cache;
};
/* Template for determining which OF properties to query for a given
* cache type */
struct cache_type_info {
const char *name;
const char *size_prop;
/* Allow for both [di]-cache-line-size and
* [di]-cache-block-size properties. According to the PowerPC
* Processor binding, -line-size should be provided if it
* differs from the cache block size (that which is operated
* on by cache instructions), so we look for -line-size first.
* See cache_get_line_size(). */
const char *line_size_props[2];
const char *nr_sets_prop;
};
/* These are used to index the cache_type_info array. */
#define CACHE_TYPE_UNIFIED 0
#define CACHE_TYPE_INSTRUCTION 1
#define CACHE_TYPE_DATA 2
static const struct cache_type_info cache_type_info[] = {
{
/* PowerPC Processor binding says the [di]-cache-*
* must be equal on unified caches, so just use
* d-cache properties. */
.name = "Unified",
.size_prop = "d-cache-size",
.line_size_props = { "d-cache-line-size",
"d-cache-block-size", },
.nr_sets_prop = "d-cache-sets",
},
{
.name = "Instruction",
.size_prop = "i-cache-size",
.line_size_props = { "i-cache-line-size",
"i-cache-block-size", },
.nr_sets_prop = "i-cache-sets",
},
{
.name = "Data",
.size_prop = "d-cache-size",
.line_size_props = { "d-cache-line-size",
"d-cache-block-size", },
.nr_sets_prop = "d-cache-sets",
},
};
/* Cache object: each instance of this corresponds to a distinct cache
* in the system. There are separate objects for Harvard caches: one
* each for instruction and data, and each refers to the same OF node.
* The refcount of the OF node is elevated for the lifetime of the
* cache object. A cache object is released when its shared_cpu_map
* is cleared (see cache_cpu_clear).
*
* A cache object is on two lists: an unsorted global list
* (cache_list) of cache objects; and a singly-linked list
* representing the local cache hierarchy, which is ordered by level
* (e.g. L1d -> L1i -> L2 -> L3).
*/
struct cache {
struct device_node *ofnode; /* OF node for this cache, may be cpu */
struct cpumask shared_cpu_map; /* online CPUs using this cache */
int type; /* split cache disambiguation */
int level; /* level not explicit in device tree */
struct list_head list; /* global list of cache objects */
struct cache *next_local; /* next cache of >= level */
};
static DEFINE_PER_CPU(struct cache_dir *, cache_dir);
/* traversal/modification of this list occurs only at cpu hotplug time;
* access is serialized by cpu hotplug locking
*/
static LIST_HEAD(cache_list);
static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
{
return container_of(k, struct cache_index_dir, kobj);
}
static const char *cache_type_string(const struct cache *cache)
{
return cache_type_info[cache->type].name;
}
static void __cpuinit cache_init(struct cache *cache, int type, int level, struct device_node *ofnode)
{
cache->type = type;
cache->level = level;
cache->ofnode = of_node_get(ofnode);
INIT_LIST_HEAD(&cache->list);
list_add(&cache->list, &cache_list);
}
static struct cache *__cpuinit new_cache(int type, int level, struct device_node *ofnode)
{
struct cache *cache;
cache = kzalloc(sizeof(*cache), GFP_KERNEL);
if (cache)
cache_init(cache, type, level, ofnode);
return cache;
}
static void release_cache_debugcheck(struct cache *cache)
{
struct cache *iter;
list_for_each_entry(iter, &cache_list, list)
WARN_ONCE(iter->next_local == cache,
"cache for %s(%s) refers to cache for %s(%s)\n",
iter->ofnode->full_name,
cache_type_string(iter),
cache->ofnode->full_name,
cache_type_string(cache));
}
static void release_cache(struct cache *cache)
{
if (!cache)
return;
pr_debug("freeing L%d %s cache for %s\n", cache->level,
cache_type_string(cache), cache->ofnode->full_name);
release_cache_debugcheck(cache);
list_del(&cache->list);
of_node_put(cache->ofnode);
kfree(cache);
}
static void cache_cpu_set(struct cache *cache, int cpu)
{
struct cache *next = cache;
while (next) {
WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
"CPU %i already accounted in %s(%s)\n",
cpu, next->ofnode->full_name,
cache_type_string(next));
cpumask_set_cpu(cpu, &next->shared_cpu_map);
next = next->next_local;
}
}
static int cache_size(const struct cache *cache, unsigned int *ret)
{
const char *propname;
const u32 *cache_size;
propname = cache_type_info[cache->type].size_prop;
cache_size = of_get_property(cache->ofnode, propname, NULL);
if (!cache_size)
return -ENODEV;
*ret = *cache_size;
return 0;
}
static int cache_size_kb(const struct cache *cache, unsigned int *ret)
{
unsigned int size;
if (cache_size(cache, &size))
return -ENODEV;
*ret = size / 1024;
return 0;
}
/* not cache_line_size() because that's a macro in include/linux/cache.h */
static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
{
const u32 *line_size;
int i, lim;
lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
for (i = 0; i < lim; i++) {
const char *propname;
propname = cache_type_info[cache->type].line_size_props[i];
line_size = of_get_property(cache->ofnode, propname, NULL);
if (line_size)
break;
}
if (!line_size)
return -ENODEV;
*ret = *line_size;
return 0;
}
static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
{
const char *propname;
const u32 *nr_sets;
propname = cache_type_info[cache->type].nr_sets_prop;
nr_sets = of_get_property(cache->ofnode, propname, NULL);
if (!nr_sets)
return -ENODEV;
*ret = *nr_sets;
return 0;
}
static int cache_associativity(const struct cache *cache, unsigned int *ret)
{
unsigned int line_size;
unsigned int nr_sets;
unsigned int size;
if (cache_nr_sets(cache, &nr_sets))
goto err;
/* If the cache is fully associative, there is no need to
* check the other properties.
*/
if (nr_sets == 1) {
*ret = 0;
return 0;
}
if (cache_get_line_size(cache, &line_size))
goto err;
if (cache_size(cache, &size))
goto err;
if (!(nr_sets > 0 && size > 0 && line_size > 0))
goto err;
*ret = (size / nr_sets) / line_size;
return 0;
err:
return -ENODEV;
}
/* helper for dealing with split caches */
static struct cache *cache_find_first_sibling(struct cache *cache)
{
struct cache *iter;
if (cache->type == CACHE_TYPE_UNIFIED)
return cache;
list_for_each_entry(iter, &cache_list, list)
if (iter->ofnode == cache->ofnode && iter->next_local == cache)
return iter;
return cache;
}
/* return the first cache on a local list matching node */
static struct cache *cache_lookup_by_node(const struct device_node *node)
{
struct cache *cache = NULL;
struct cache *iter;
list_for_each_entry(iter, &cache_list, list) {
if (iter->ofnode != node)
continue;
cache = cache_find_first_sibling(iter);
break;
}
return cache;
}
static bool cache_node_is_unified(const struct device_node *np)
{
return of_get_property(np, "cache-unified", NULL);
}
static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node *node, int level)
{
struct cache *cache;
pr_debug("creating L%d ucache for %s\n", level, node->full_name);
cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
return cache;
}
static struct cache *__cpuinit cache_do_one_devnode_split(struct device_node *node, int level)
{
struct cache *dcache, *icache;
pr_debug("creating L%d dcache and icache for %s\n", level,
node->full_name);
dcache = new_cache(CACHE_TYPE_DATA, level, node);
icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
if (!dcache || !icache)
goto err;
dcache->next_local = icache;
return dcache;
err:
release_cache(dcache);
release_cache(icache);
return NULL;
}
static struct cache *__cpuinit cache_do_one_devnode(struct device_node *node, int level)
{
struct cache *cache;
if (cache_node_is_unified(node))
cache = cache_do_one_devnode_unified(node, level);
else
cache = cache_do_one_devnode_split(node, level);
return cache;
}
static struct cache *__cpuinit cache_lookup_or_instantiate(struct device_node *node, int level)
{
struct cache *cache;
cache = cache_lookup_by_node(node);
WARN_ONCE(cache && cache->level != level,
"cache level mismatch on lookup (got %d, expected %d)\n",
cache->level, level);
if (!cache)
cache = cache_do_one_devnode(node, level);
return cache;
}
static void __cpuinit link_cache_lists(struct cache *smaller, struct cache *bigger)
{
while (smaller->next_local) {
if (smaller->next_local == bigger)
return; /* already linked */
smaller = smaller->next_local;
}
smaller->next_local = bigger;
}
static void __cpuinit do_subsidiary_caches_debugcheck(struct cache *cache)
{
WARN_ON_ONCE(cache->level != 1);
WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu"));
}
static void __cpuinit do_subsidiary_caches(struct cache *cache)
{
struct device_node *subcache_node;
int level = cache->level;
do_subsidiary_caches_debugcheck(cache);
while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
struct cache *subcache;
level++;
subcache = cache_lookup_or_instantiate(subcache_node, level);
of_node_put(subcache_node);
if (!subcache)
break;
link_cache_lists(cache, subcache);
cache = subcache;
}
}
static struct cache *__cpuinit cache_chain_instantiate(unsigned int cpu_id)
{
struct device_node *cpu_node;
struct cache *cpu_cache = NULL;
pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
cpu_node = of_get_cpu_node(cpu_id, NULL);
WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
if (!cpu_node)
goto out;
cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
if (!cpu_cache)
goto out;
do_subsidiary_caches(cpu_cache);
cache_cpu_set(cpu_cache, cpu_id);
out:
of_node_put(cpu_node);
return cpu_cache;
}
static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_id)
{
struct cache_dir *cache_dir;
struct sys_device *sysdev;
struct kobject *kobj = NULL;
sysdev = get_cpu_sysdev(cpu_id);
WARN_ONCE(!sysdev, "no sysdev for CPU %i\n", cpu_id);
if (!sysdev)
goto err;
kobj = kobject_create_and_add("cache", &sysdev->kobj);
if (!kobj)
goto err;
cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
if (!cache_dir)
goto err;
cache_dir->kobj = kobj;
WARN_ON_ONCE(per_cpu(cache_dir, cpu_id) != NULL);
per_cpu(cache_dir, cpu_id) = cache_dir;
return cache_dir;
err:
kobject_put(kobj);
return NULL;
}
static void cache_index_release(struct kobject *kobj)
{
struct cache_index_dir *index;
index = kobj_to_cache_index_dir(kobj);
pr_debug("freeing index directory for L%d %s cache\n",
index->cache->level, cache_type_string(index->cache));
kfree(index);
}
static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
{
struct kobj_attribute *kobj_attr;
kobj_attr = container_of(attr, struct kobj_attribute, attr);
return kobj_attr->show(k, kobj_attr, buf);
}
static struct cache *index_kobj_to_cache(struct kobject *k)
{
struct cache_index_dir *index;
index = kobj_to_cache_index_dir(k);
return index->cache;
}
static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
unsigned int size_kb;
struct cache *cache;
cache = index_kobj_to_cache(k);
if (cache_size_kb(cache, &size_kb))
return -ENODEV;
return sprintf(buf, "%uK\n", size_kb);
}
static struct kobj_attribute cache_size_attr =
__ATTR(size, 0444, size_show, NULL);
static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
unsigned int line_size;
struct cache *cache;
cache = index_kobj_to_cache(k);
if (cache_get_line_size(cache, &line_size))
return -ENODEV;
return sprintf(buf, "%u\n", line_size);
}
static struct kobj_attribute cache_line_size_attr =
__ATTR(coherency_line_size, 0444, line_size_show, NULL);
static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
unsigned int nr_sets;
struct cache *cache;
cache = index_kobj_to_cache(k);
if (cache_nr_sets(cache, &nr_sets))
return -ENODEV;
return sprintf(buf, "%u\n", nr_sets);
}
static struct kobj_attribute cache_nr_sets_attr =
__ATTR(number_of_sets, 0444, nr_sets_show, NULL);
static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
unsigned int associativity;
struct cache *cache;
cache = index_kobj_to_cache(k);
if (cache_associativity(cache, &associativity))
return -ENODEV;
return sprintf(buf, "%u\n", associativity);
}
static struct kobj_attribute cache_assoc_attr =
__ATTR(ways_of_associativity, 0444, associativity_show, NULL);
static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
struct cache *cache;
cache = index_kobj_to_cache(k);
return sprintf(buf, "%s\n", cache_type_string(cache));
}
static struct kobj_attribute cache_type_attr =
__ATTR(type, 0444, type_show, NULL);
static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
struct cache_index_dir *index;
struct cache *cache;
index = kobj_to_cache_index_dir(k);
cache = index->cache;
return sprintf(buf, "%d\n", cache->level);
}
static struct kobj_attribute cache_level_attr =
__ATTR(level, 0444, level_show, NULL);
static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
struct cache_index_dir *index;
struct cache *cache;
int len;
int n = 0;
index = kobj_to_cache_index_dir(k);
cache = index->cache;
len = PAGE_SIZE - 2;
if (len > 1) {
n = cpumask_scnprintf(buf, len, &cache->shared_cpu_map);
buf[n++] = '\n';
buf[n] = '\0';
}
return n;
}
static struct kobj_attribute cache_shared_cpu_map_attr =
__ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
/* Attributes which should always be created -- the kobject/sysfs core
* does this automatically via kobj_type->default_attrs. This is the
* minimum data required to uniquely identify a cache.
*/
static struct attribute *cache_index_default_attrs[] = {
&cache_type_attr.attr,
&cache_level_attr.attr,
&cache_shared_cpu_map_attr.attr,
NULL,
};
/* Attributes which should be created if the cache device node has the
* right properties -- see cacheinfo_create_index_opt_attrs
*/
static struct kobj_attribute *cache_index_opt_attrs[] = {
&cache_size_attr,
&cache_line_size_attr,
&cache_nr_sets_attr,
&cache_assoc_attr,
};
static struct sysfs_ops cache_index_ops = {
.show = cache_index_show,
};
static struct kobj_type cache_index_type = {
.release = cache_index_release,
.sysfs_ops = &cache_index_ops,
.default_attrs = cache_index_default_attrs,
};
static void __cpuinit cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
{
const char *cache_name;
const char *cache_type;
struct cache *cache;
char *buf;
int i;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
return;
cache = dir->cache;
cache_name = cache->ofnode->full_name;
cache_type = cache_type_string(cache);
/* We don't want to create an attribute that can't provide a
* meaningful value. Check the return value of each optional
* attribute's ->show method before registering the
* attribute.
*/
for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
struct kobj_attribute *attr;
ssize_t rc;
attr = cache_index_opt_attrs[i];
rc = attr->show(&dir->kobj, attr, buf);
if (rc <= 0) {
pr_debug("not creating %s attribute for "
"%s(%s) (rc = %zd)\n",
attr->attr.name, cache_name,
cache_type, rc);
continue;
}
if (sysfs_create_file(&dir->kobj, &attr->attr))
pr_debug("could not create %s attribute for %s(%s)\n",
attr->attr.name, cache_name, cache_type);
}
kfree(buf);
}
static void __cpuinit cacheinfo_create_index_dir(struct cache *cache, int index, struct cache_dir *cache_dir)
{
struct cache_index_dir *index_dir;
int rc;
index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
if (!index_dir)
goto err;
index_dir->cache = cache;
rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
cache_dir->kobj, "index%d", index);
if (rc)
goto err;
index_dir->next = cache_dir->index;
cache_dir->index = index_dir;
cacheinfo_create_index_opt_attrs(index_dir);
return;
err:
kfree(index_dir);
}
static void __cpuinit cacheinfo_sysfs_populate(unsigned int cpu_id, struct cache *cache_list)
{
struct cache_dir *cache_dir;
struct cache *cache;
int index = 0;
cache_dir = cacheinfo_create_cache_dir(cpu_id);
if (!cache_dir)
return;
cache = cache_list;
while (cache) {
cacheinfo_create_index_dir(cache, index, cache_dir);
index++;
cache = cache->next_local;
}
}
void __cpuinit cacheinfo_cpu_online(unsigned int cpu_id)
{
struct cache *cache;
cache = cache_chain_instantiate(cpu_id);
if (!cache)
return;
cacheinfo_sysfs_populate(cpu_id, cache);
}
#ifdef CONFIG_HOTPLUG_CPU /* functions needed for cpu offline */
static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
{
struct device_node *cpu_node;
struct cache *cache;
cpu_node = of_get_cpu_node(cpu_id, NULL);
WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
if (!cpu_node)
return NULL;
cache = cache_lookup_by_node(cpu_node);
of_node_put(cpu_node);
return cache;
}
static void remove_index_dirs(struct cache_dir *cache_dir)
{
struct cache_index_dir *index;
index = cache_dir->index;
while (index) {
struct cache_index_dir *next;
next = index->next;
kobject_put(&index->kobj);
index = next;
}
}
static void remove_cache_dir(struct cache_dir *cache_dir)
{
remove_index_dirs(cache_dir);
kobject_put(cache_dir->kobj);
kfree(cache_dir);
}
static void cache_cpu_clear(struct cache *cache, int cpu)
{
while (cache) {
struct cache *next = cache->next_local;
WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
"CPU %i not accounted in %s(%s)\n",
cpu, cache->ofnode->full_name,
cache_type_string(cache));
cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
/* Release the cache object if all the cpus using it
* are offline */
if (cpumask_empty(&cache->shared_cpu_map))
release_cache(cache);
cache = next;
}
}
void cacheinfo_cpu_offline(unsigned int cpu_id)
{
struct cache_dir *cache_dir;
struct cache *cache;
/* Prevent userspace from seeing inconsistent state - remove
* the sysfs hierarchy first */
cache_dir = per_cpu(cache_dir, cpu_id);
/* careful, sysfs population may have failed */
if (cache_dir)
remove_cache_dir(cache_dir);
per_cpu(cache_dir, cpu_id) = NULL;
/* clear the CPU's bit in its cache chain, possibly freeing
* cache objects */
cache = cache_lookup_by_cpu(cpu_id);
if (cache)
cache_cpu_clear(cache, cpu_id);
}
#endif /* CONFIG_HOTPLUG_CPU */
#ifndef _PPC_CACHEINFO_H
#define _PPC_CACHEINFO_H
/* These are just hooks for sysfs.c to use. */
extern void cacheinfo_cpu_online(unsigned int cpu_id);
extern void cacheinfo_cpu_offline(unsigned int cpu_id);
#endif /* _PPC_CACHEINFO_H */
......@@ -16,7 +16,7 @@
* 2 of the License, or (at your option) any later version.
*/
#undef DEBUG
#define DEBUG
#include <linux/kernel.h>
#include <linux/pci.h>
......@@ -1356,6 +1356,63 @@ static void __init pcibios_allocate_resources(int pass)
}
}
static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
{
struct pci_controller *hose = pci_bus_to_host(bus);
resource_size_t offset;
struct resource *res, *pres;
int i;
pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus));
/* Check for IO */
if (!(hose->io_resource.flags & IORESOURCE_IO))
goto no_io;
offset = (unsigned long)hose->io_base_virt - _IO_BASE;
res = kzalloc(sizeof(struct resource), GFP_KERNEL);
BUG_ON(res == NULL);
res->name = "Legacy IO";
res->flags = IORESOURCE_IO;
res->start = offset;
res->end = (offset + 0xfff) & 0xfffffffful;
pr_debug("Candidate legacy IO: %pR\n", res);
if (request_resource(&hose->io_resource, res)) {
printk(KERN_DEBUG
"PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
pci_domain_nr(bus), bus->number, res);
kfree(res);
}
no_io:
/* Check for memory */
offset = hose->pci_mem_offset;
pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset);
for (i = 0; i < 3; i++) {
pres = &hose->mem_resources[i];
if (!(pres->flags & IORESOURCE_MEM))
continue;
pr_debug("hose mem res: %pR\n", pres);
if ((pres->start - offset) <= 0xa0000 &&
(pres->end - offset) >= 0xbffff)
break;
}
if (i >= 3)
return;
res = kzalloc(sizeof(struct resource), GFP_KERNEL);
BUG_ON(res == NULL);
res->name = "Legacy VGA memory";
res->flags = IORESOURCE_MEM;
res->start = 0xa0000 + offset;
res->end = 0xbffff + offset;
pr_debug("Candidate VGA memory: %pR\n", res);
if (request_resource(pres, res)) {
printk(KERN_DEBUG
"PCI %04x:%02x Cannot reserve VGA memory %pR\n",
pci_domain_nr(bus), bus->number, res);
kfree(res);
}
}
void __init pcibios_resource_survey(void)
{
struct pci_bus *b;
......@@ -1371,6 +1428,18 @@ void __init pcibios_resource_survey(void)
pcibios_allocate_resources(1);
}
/* Before we start assigning unassigned resource, we try to reserve
* the low IO area and the VGA memory area if they intersect the
* bus available resources to avoid allocating things on top of them
*/
if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) {
list_for_each_entry(b, &pci_root_buses, node)
pcibios_reserve_legacy_regions(b);
}
/* Now, if the platform didn't decide to blindly trust the firmware,
* we proceed to assigning things that were left unassigned
*/
if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) {
pr_debug("PCI: Assigning unassigned resouces...\n");
pci_assign_unassigned_resources();
......
......@@ -560,9 +560,14 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus,
* G5 machines... So when something asks for bus 0 io base
* (bus 0 is HT root), we return the AGP one instead.
*/
if (machine_is_compatible("MacRISC4"))
if (in_bus == 0)
if (in_bus == 0 && machine_is_compatible("MacRISC4")) {
struct device_node *agp;
agp = of_find_compatible_node(NULL, NULL, "u3-agp");
if (agp)
in_bus = 0xf0;
of_node_put(agp);
}
/* That syscall isn't quite compatible with PCI domains, but it's
* used on pre-domains setup. We return the first match
......
......@@ -165,6 +165,7 @@ EXPORT_SYMBOL(timer_interrupt);
EXPORT_SYMBOL(irq_desc);
EXPORT_SYMBOL(tb_ticks_per_jiffy);
EXPORT_SYMBOL(cacheable_memcpy);
EXPORT_SYMBOL(cacheable_memzero);
#endif
#ifdef CONFIG_PPC32
......
......@@ -824,11 +824,11 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
#endif
#ifdef CONFIG_KEXEC
lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
if (lprop)
crashk_res.start = *lprop;
lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
if (lprop)
crashk_res.end = crashk_res.start + *lprop - 1;
#endif
......@@ -893,12 +893,12 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
u64 base, size, lmb_size;
unsigned int is_kexec_kdump = 0, rngs;
ls = (cell_t *)of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t))
return 0;
lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls);
dm = (cell_t *)of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l);
dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l);
if (dm == NULL || l < sizeof(cell_t))
return 0;
......@@ -907,7 +907,7 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
return 0;
/* check if this is a kexec/kdump kernel. */
usm = (cell_t *)of_get_flat_dt_prop(node, "linux,drconf-usable-memory",
usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory",
&l);
if (usm != NULL)
is_kexec_kdump = 1;
......@@ -981,9 +981,9 @@ static int __init early_init_dt_scan_memory(unsigned long node,
} else if (strcmp(type, "memory") != 0)
return 0;
reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l);
reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
if (reg == NULL)
reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
reg = of_get_flat_dt_prop(node, "reg", &l);
if (reg == NULL)
return 0;
......
......@@ -1210,7 +1210,7 @@ static void __init prom_initialize_tce_table(void)
/* Initialize the table to have a one-to-one mapping
* over the allocated size.
*/
tce_entryp = (unsigned long *)base;
tce_entryp = (u64 *)base;
for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
tce_entry = (i << PAGE_SHIFT);
tce_entry |= 0x3;
......
......@@ -18,6 +18,8 @@
#include <asm/machdep.h>
#include <asm/smp.h>
#include "cacheinfo.h"
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#include <asm/lppaca.h>
......@@ -25,8 +27,6 @@
static DEFINE_PER_CPU(struct cpu, cpu_devices);
static DEFINE_PER_CPU(struct kobject *, cache_toplevel);
/*
* SMT snooze delay stuff, 64-bit only for now
*/
......@@ -343,283 +343,6 @@ static struct sysdev_attribute pa6t_attrs[] = {
#endif /* HAS_PPC_PMC_PA6T */
#endif /* HAS_PPC_PMC_CLASSIC */
struct cache_desc {
struct kobject kobj;
struct cache_desc *next;
const char *type; /* Instruction, Data, or Unified */
u32 size; /* total cache size in KB */
u32 line_size; /* in bytes */
u32 nr_sets; /* number of sets */
u32 level; /* e.g. 1, 2, 3... */
u32 associativity; /* e.g. 8-way... 0 is fully associative */
};
DEFINE_PER_CPU(struct cache_desc *, cache_desc);
static struct cache_desc *kobj_to_cache_desc(struct kobject *k)
{
return container_of(k, struct cache_desc, kobj);
}
static void cache_desc_release(struct kobject *k)
{
struct cache_desc *desc = kobj_to_cache_desc(k);
pr_debug("%s: releasing %s\n", __func__, kobject_name(k));
if (desc->next)
kobject_put(&desc->next->kobj);
kfree(kobj_to_cache_desc(k));
}
static ssize_t cache_desc_show(struct kobject *k, struct attribute *attr, char *buf)
{
struct kobj_attribute *kobj_attr;
kobj_attr = container_of(attr, struct kobj_attribute, attr);
return kobj_attr->show(k, kobj_attr, buf);
}
static struct sysfs_ops cache_desc_sysfs_ops = {
.show = cache_desc_show,
};
static struct kobj_type cache_desc_type = {
.release = cache_desc_release,
.sysfs_ops = &cache_desc_sysfs_ops,
};
static ssize_t cache_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
struct cache_desc *cache = kobj_to_cache_desc(k);
return sprintf(buf, "%uK\n", cache->size);
}
static struct kobj_attribute cache_size_attr =
__ATTR(size, 0444, cache_size_show, NULL);
static ssize_t cache_line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
struct cache_desc *cache = kobj_to_cache_desc(k);
return sprintf(buf, "%u\n", cache->line_size);
}
static struct kobj_attribute cache_line_size_attr =
__ATTR(coherency_line_size, 0444, cache_line_size_show, NULL);
static ssize_t cache_nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
struct cache_desc *cache = kobj_to_cache_desc(k);
return sprintf(buf, "%u\n", cache->nr_sets);
}
static struct kobj_attribute cache_nr_sets_attr =
__ATTR(number_of_sets, 0444, cache_nr_sets_show, NULL);
static ssize_t cache_type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
struct cache_desc *cache = kobj_to_cache_desc(k);
return sprintf(buf, "%s\n", cache->type);
}
static struct kobj_attribute cache_type_attr =
__ATTR(type, 0444, cache_type_show, NULL);
static ssize_t cache_level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
struct cache_desc *cache = kobj_to_cache_desc(k);
return sprintf(buf, "%u\n", cache->level);
}
static struct kobj_attribute cache_level_attr =
__ATTR(level, 0444, cache_level_show, NULL);
static ssize_t cache_assoc_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
struct cache_desc *cache = kobj_to_cache_desc(k);
return sprintf(buf, "%u\n", cache->associativity);
}
static struct kobj_attribute cache_assoc_attr =
__ATTR(ways_of_associativity, 0444, cache_assoc_show, NULL);
struct cache_desc_info {
const char *type;
const char *size_prop;
const char *line_size_prop;
const char *nr_sets_prop;
};
/* PowerPC Processor binding says the [di]-cache-* must be equal on
* unified caches, so just use d-cache properties. */
static struct cache_desc_info ucache_info = {
.type = "Unified",
.size_prop = "d-cache-size",
.line_size_prop = "d-cache-line-size",
.nr_sets_prop = "d-cache-sets",
};
static struct cache_desc_info dcache_info = {
.type = "Data",
.size_prop = "d-cache-size",
.line_size_prop = "d-cache-line-size",
.nr_sets_prop = "d-cache-sets",
};
static struct cache_desc_info icache_info = {
.type = "Instruction",
.size_prop = "i-cache-size",
.line_size_prop = "i-cache-line-size",
.nr_sets_prop = "i-cache-sets",
};
static struct cache_desc * __cpuinit create_cache_desc(struct device_node *np, struct kobject *parent, int index, int level, struct cache_desc_info *info)
{
const u32 *cache_line_size;
struct cache_desc *new;
const u32 *cache_size;
const u32 *nr_sets;
int rc;
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return NULL;
rc = kobject_init_and_add(&new->kobj, &cache_desc_type, parent,
"index%d", index);
if (rc)
goto err;
/* type */
new->type = info->type;
rc = sysfs_create_file(&new->kobj, &cache_type_attr.attr);
WARN_ON(rc);
/* level */
new->level = level;
rc = sysfs_create_file(&new->kobj, &cache_level_attr.attr);
WARN_ON(rc);
/* size */
cache_size = of_get_property(np, info->size_prop, NULL);
if (cache_size) {
new->size = *cache_size / 1024;
rc = sysfs_create_file(&new->kobj,
&cache_size_attr.attr);
WARN_ON(rc);
}
/* coherency_line_size */
cache_line_size = of_get_property(np, info->line_size_prop, NULL);
if (cache_line_size) {
new->line_size = *cache_line_size;
rc = sysfs_create_file(&new->kobj,
&cache_line_size_attr.attr);
WARN_ON(rc);
}
/* number_of_sets */
nr_sets = of_get_property(np, info->nr_sets_prop, NULL);
if (nr_sets) {
new->nr_sets = *nr_sets;
rc = sysfs_create_file(&new->kobj,
&cache_nr_sets_attr.attr);
WARN_ON(rc);
}
/* ways_of_associativity */
if (new->nr_sets == 1) {
/* fully associative */
new->associativity = 0;
goto create_assoc;
}
if (new->nr_sets && new->size && new->line_size) {
/* If we have values for all of these we can derive
* the associativity. */
new->associativity =
((new->size * 1024) / new->nr_sets) / new->line_size;
create_assoc:
rc = sysfs_create_file(&new->kobj,
&cache_assoc_attr.attr);
WARN_ON(rc);
}
return new;
err:
kfree(new);
return NULL;
}
static bool cache_is_unified(struct device_node *np)
{
return of_get_property(np, "cache-unified", NULL);
}
static struct cache_desc * __cpuinit create_cache_index_info(struct device_node *np, struct kobject *parent, int index, int level)
{
struct device_node *next_cache;
struct cache_desc *new, **end;
pr_debug("%s(node = %s, index = %d)\n", __func__, np->full_name, index);
if (cache_is_unified(np)) {
new = create_cache_desc(np, parent, index, level,
&ucache_info);
} else {
new = create_cache_desc(np, parent, index, level,
&dcache_info);
if (new) {
index++;
new->next = create_cache_desc(np, parent, index, level,
&icache_info);
}
}
if (!new)
return NULL;
end = &new->next;
while (*end)
end = &(*end)->next;
next_cache = of_find_next_cache_node(np);
if (!next_cache)
goto out;
*end = create_cache_index_info(next_cache, parent, ++index, ++level);
of_node_put(next_cache);
out:
return new;
}
static void __cpuinit create_cache_info(struct sys_device *sysdev)
{
struct kobject *cache_toplevel;
struct device_node *np = NULL;
int cpu = sysdev->id;
cache_toplevel = kobject_create_and_add("cache", &sysdev->kobj);
if (!cache_toplevel)
return;
per_cpu(cache_toplevel, cpu) = cache_toplevel;
np = of_get_cpu_node(cpu, NULL);
if (np != NULL) {
per_cpu(cache_desc, cpu) =
create_cache_index_info(np, cache_toplevel, 0, 1);
of_node_put(np);
}
return;
}
static void __cpuinit register_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
......@@ -684,25 +407,10 @@ static void __cpuinit register_cpu_online(unsigned int cpu)
sysdev_create_file(s, &attr_dscr);
#endif /* CONFIG_PPC64 */
create_cache_info(s);
cacheinfo_cpu_online(cpu);
}
#ifdef CONFIG_HOTPLUG_CPU
static void remove_cache_info(struct sys_device *sysdev)
{
struct kobject *cache_toplevel;
struct cache_desc *cache_desc;
int cpu = sysdev->id;
cache_desc = per_cpu(cache_desc, cpu);
if (cache_desc != NULL)
kobject_put(&cache_desc->kobj);
cache_toplevel = per_cpu(cache_toplevel, cpu);
if (cache_toplevel != NULL)
kobject_put(cache_toplevel);
}
static void unregister_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
......@@ -769,7 +477,7 @@ static void unregister_cpu_online(unsigned int cpu)
sysdev_remove_file(s, &attr_dscr);
#endif /* CONFIG_PPC64 */
remove_cache_info(s);
cacheinfo_cpu_offline(cpu);
}
#endif /* CONFIG_HOTPLUG_CPU */
......
......@@ -30,11 +30,11 @@
#if defined(CONFIG_40x) || defined(CONFIG_8xx)
static inline void _tlbil_all(void)
{
asm volatile ("sync; tlbia; isync" : : : "memory")
asm volatile ("sync; tlbia; isync" : : : "memory");
}
static inline void _tlbil_pid(unsigned int pid)
{
asm volatile ("sync; tlbia; isync" : : : "memory")
asm volatile ("sync; tlbia; isync" : : : "memory");
}
#else /* CONFIG_40x || CONFIG_8xx */
extern void _tlbil_all(void);
......@@ -47,7 +47,7 @@ extern void _tlbil_pid(unsigned int pid);
#ifdef CONFIG_8xx
static inline void _tlbil_va(unsigned long address, unsigned int pid)
{
asm volatile ("tlbie %0; sync" : : "r" (address) : "memory")
asm volatile ("tlbie %0; sync" : : "r" (address) : "memory");
}
#else /* CONFIG_8xx */
extern void _tlbil_va(unsigned long address, unsigned int pid);
......
......@@ -822,42 +822,50 @@ static void __init dump_numa_memory_topology(void)
* required. nid is the preferred node and end is the physical address of
* the highest address in the node.
*
* Returns the physical address of the memory.
* Returns the virtual address of the memory.
*/
static void __init *careful_allocation(int nid, unsigned long size,
static void __init *careful_zallocation(int nid, unsigned long size,
unsigned long align,
unsigned long end_pfn)
{
void *ret;
int new_nid;
unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
unsigned long ret_paddr;
ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
/* retry over all memory */
if (!ret)
ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
if (!ret_paddr)
ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
if (!ret)
panic("numa.c: cannot allocate %lu bytes on node %d",
if (!ret_paddr)
panic("numa.c: cannot allocate %lu bytes for node %d",
size, nid);
ret = __va(ret_paddr);
/*
* If the memory came from a previously allocated node, we must
* retry with the bootmem allocator.
* We initialize the nodes in numeric order: 0, 1, 2...
* and hand over control from the LMB allocator to the
* bootmem allocator. If this function is called for
* node 5, then we know that all nodes <5 are using the
* bootmem allocator instead of the LMB allocator.
*
* So, check the nid from which this allocation came
* and double check to see if we need to use bootmem
* instead of the LMB. We don't free the LMB memory
* since it would be useless.
*/
new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT);
new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
if (new_nid < nid) {
ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid),
ret = __alloc_bootmem_node(NODE_DATA(new_nid),
size, align, 0);
if (!ret)
panic("numa.c: cannot allocate %lu bytes on node %d",
size, new_nid);
ret = __pa(ret);
dbg("alloc_bootmem %lx %lx\n", ret, size);
dbg("alloc_bootmem %p %lx\n", ret, size);
}
return (void *)ret;
memset(ret, 0, size);
return ret;
}
static struct notifier_block __cpuinitdata ppc64_numa_nb = {
......@@ -952,7 +960,7 @@ void __init do_init_bootmem(void)
for_each_online_node(nid) {
unsigned long start_pfn, end_pfn;
unsigned long bootmem_paddr;
void *bootmem_vaddr;
unsigned long bootmap_pages;
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
......@@ -964,11 +972,9 @@ void __init do_init_bootmem(void)
* previous nodes' bootmem to be initialized and have
* all reserved areas marked.
*/
NODE_DATA(nid) = careful_allocation(nid,
NODE_DATA(nid) = careful_zallocation(nid,
sizeof(struct pglist_data),
SMP_CACHE_BYTES, end_pfn);
NODE_DATA(nid) = __va(NODE_DATA(nid));
memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
dbg("node %d\n", nid);
dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
......@@ -984,20 +990,20 @@ void __init do_init_bootmem(void)
dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
bootmem_paddr = (unsigned long)careful_allocation(nid,
bootmem_vaddr = careful_zallocation(nid,
bootmap_pages << PAGE_SHIFT,
PAGE_SIZE, end_pfn);
memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT);
dbg("bootmap_paddr = %lx\n", bootmem_paddr);
dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
init_bootmem_node(NODE_DATA(nid),
__pa(bootmem_vaddr) >> PAGE_SHIFT,
start_pfn, end_pfn);
free_bootmem_with_active_regions(nid, end_pfn);
/*
* Be very careful about moving this around. Future
* calls to careful_allocation() depend on this getting
* calls to careful_zallocation() depend on this getting
* done correctly.
*/
mark_reserved_regions_for_nid(nid);
......
......@@ -266,7 +266,8 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
/* The PTE should never be already set nor present in the
* hash table
*/
BUG_ON(pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE));
BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) &&
flags);
set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags)));
}
......
......@@ -189,8 +189,9 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
_tlbil_pid(0);
preempt_enable();
#endif
#else
_tlbil_pid(0);
#endif
}
EXPORT_SYMBOL(flush_tlb_kernel_range);
......
......@@ -79,7 +79,7 @@ struct spu_buffer {
* the vma-to-fileoffset map.
*/
struct vma_to_fileoffset_map *create_vma_map(const struct spu *spu,
u64 objectid);
unsigned long objectid);
unsigned int vma_map_lookup(struct vma_to_fileoffset_map *map,
unsigned int vma, const struct spu *aSpu,
int *grd_val);
......
......@@ -42,7 +42,7 @@ static struct of_device_id mpc52xx_bus_ids[] __initdata = {
* from interrupt context while node mapping (which calls ioremap())
* cannot be used at such point.
*/
static spinlock_t mpc52xx_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(mpc52xx_lock);
static struct mpc52xx_gpt __iomem *mpc52xx_wdt;
static struct mpc52xx_cdm __iomem *mpc52xx_cdm;
......
......@@ -42,7 +42,7 @@ static void __init mpc831x_rdb_setup_arch(void)
mpc831x_usb_cfg();
}
void __init mpc831x_rdb_init_IRQ(void)
static void __init mpc831x_rdb_init_IRQ(void)
{
struct device_node *np;
......
......@@ -49,8 +49,6 @@
#define DBG(fmt...)
#endif
static u8 *bcsr_regs = NULL;
/* ************************************************************************
*
* Setup the architecture
......@@ -59,13 +57,14 @@ static u8 *bcsr_regs = NULL;
static void __init mpc832x_sys_setup_arch(void)
{
struct device_node *np;
u8 __iomem *bcsr_regs = NULL;
if (ppc_md.progress)
ppc_md.progress("mpc832x_sys_setup_arch()", 0);
/* Map BCSR area */
np = of_find_node_by_name(NULL, "bcsr");
if (np != 0) {
if (np) {
struct resource res;
of_address_to_resource(np, 0, &res);
......@@ -93,9 +92,9 @@ static void __init mpc832x_sys_setup_arch(void)
!= NULL){
/* Reset the Ethernet PHYs */
#define BCSR8_FETH_RST 0x50
bcsr_regs[8] &= ~BCSR8_FETH_RST;
clrbits8(&bcsr_regs[8], BCSR8_FETH_RST);
udelay(1000);
bcsr_regs[8] |= BCSR8_FETH_RST;
setbits8(&bcsr_regs[8], BCSR8_FETH_RST);
iounmap(bcsr_regs);
of_node_put(np);
}
......
......@@ -38,6 +38,7 @@
#define DBG(fmt...)
#endif
#ifdef CONFIG_QUICC_ENGINE
static void mpc83xx_spi_activate_cs(u8 cs, u8 polarity)
{
pr_debug("%s %d %d\n", __func__, cs, polarity);
......@@ -77,8 +78,8 @@ static int __init mpc832x_spi_init(void)
mpc83xx_spi_activate_cs,
mpc83xx_spi_deactivate_cs);
}
machine_device_initcall(mpc832x_rdb, mpc832x_spi_init);
#endif /* CONFIG_QUICC_ENGINE */
/* ************************************************************************
*
......@@ -130,7 +131,7 @@ static int __init mpc832x_declare_of_platform_devices(void)
}
machine_device_initcall(mpc832x_rdb, mpc832x_declare_of_platform_devices);
void __init mpc832x_rdb_init_IRQ(void)
static void __init mpc832x_rdb_init_IRQ(void)
{
struct device_node *np;
......
......@@ -18,6 +18,7 @@
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/reboot.h>
......@@ -43,6 +44,7 @@
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#include <sysdev/simple_gpio.h>
#include <asm/qe.h>
#include <asm/qe_ic.h>
......@@ -55,8 +57,6 @@
#define DBG(fmt...)
#endif
static u8 *bcsr_regs = NULL;
/* ************************************************************************
*
* Setup the architecture
......@@ -65,13 +65,14 @@ static u8 *bcsr_regs = NULL;
static void __init mpc836x_mds_setup_arch(void)
{
struct device_node *np;
u8 __iomem *bcsr_regs = NULL;
if (ppc_md.progress)
ppc_md.progress("mpc836x_mds_setup_arch()", 0);
/* Map BCSR area */
np = of_find_node_by_name(NULL, "bcsr");
if (np != 0) {
if (np) {
struct resource res;
of_address_to_resource(np, 0, &res);
......@@ -93,6 +94,16 @@ static void __init mpc836x_mds_setup_arch(void)
for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;)
par_io_of_config(np);
#ifdef CONFIG_QE_USB
/* Must fixup Par IO before QE GPIO chips are registered. */
par_io_config_pin(1, 2, 1, 0, 3, 0); /* USBOE */
par_io_config_pin(1, 3, 1, 0, 3, 0); /* USBTP */
par_io_config_pin(1, 8, 1, 0, 1, 0); /* USBTN */
par_io_config_pin(1, 10, 2, 0, 3, 0); /* USBRXD */
par_io_config_pin(1, 9, 2, 1, 3, 0); /* USBRP */
par_io_config_pin(1, 11, 2, 1, 3, 0); /* USBRN */
par_io_config_pin(2, 20, 2, 0, 1, 0); /* CLK21 */
#endif /* CONFIG_QE_USB */
}
if ((np = of_find_compatible_node(NULL, "network", "ucc_geth"))
......@@ -151,6 +162,70 @@ static int __init mpc836x_declare_of_platform_devices(void)
}
machine_device_initcall(mpc836x_mds, mpc836x_declare_of_platform_devices);
#ifdef CONFIG_QE_USB
static int __init mpc836x_usb_cfg(void)
{
u8 __iomem *bcsr;
struct device_node *np;
const char *mode;
int ret = 0;
np = of_find_compatible_node(NULL, NULL, "fsl,mpc8360mds-bcsr");
if (!np)
return -ENODEV;
bcsr = of_iomap(np, 0);
of_node_put(np);
if (!bcsr)
return -ENOMEM;
np = of_find_compatible_node(NULL, NULL, "fsl,mpc8323-qe-usb");
if (!np) {
ret = -ENODEV;
goto err;
}
#define BCSR8_TSEC1M_MASK (0x3 << 6)
#define BCSR8_TSEC1M_RGMII (0x0 << 6)
#define BCSR8_TSEC2M_MASK (0x3 << 4)
#define BCSR8_TSEC2M_RGMII (0x0 << 4)
/*
* Default is GMII (2), but we should set it to RGMII (0) if we use
* USB (Eth PHY is in RGMII mode anyway).
*/
clrsetbits_8(&bcsr[8], BCSR8_TSEC1M_MASK | BCSR8_TSEC2M_MASK,
BCSR8_TSEC1M_RGMII | BCSR8_TSEC2M_RGMII);
#define BCSR13_USBMASK 0x0f
#define BCSR13_nUSBEN 0x08 /* 1 - Disable, 0 - Enable */
#define BCSR13_USBSPEED 0x04 /* 1 - Full, 0 - Low */
#define BCSR13_USBMODE 0x02 /* 1 - Host, 0 - Function */
#define BCSR13_nUSBVCC 0x01 /* 1 - gets VBUS, 0 - supplies VBUS */
clrsetbits_8(&bcsr[13], BCSR13_USBMASK, BCSR13_USBSPEED);
mode = of_get_property(np, "mode", NULL);
if (mode && !strcmp(mode, "peripheral")) {
setbits8(&bcsr[13], BCSR13_nUSBVCC);
qe_usb_clock_set(QE_CLK21, 48000000);
} else {
setbits8(&bcsr[13], BCSR13_USBMODE);
/*
* The BCSR GPIOs are used to control power and
* speed of the USB transceiver. This is needed for
* the USB Host only.
*/
simple_gpiochip_init("fsl,mpc8360mds-bcsr-gpio");
}
of_node_put(np);
err:
iounmap(bcsr);
return ret;
}
machine_arch_initcall(mpc836x_mds, mpc836x_usb_cfg);
#endif /* CONFIG_QE_USB */
static void __init mpc836x_mds_init_IRQ(void)
{
struct device_node *np;
......
......@@ -51,8 +51,9 @@ static void __init mpc836x_rdk_setup_arch(void)
for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
mpc83xx_add_bridge(np);
#endif
#ifdef CONFIG_QUICC_ENGINE
qe_reset();
#endif
}
static void __init mpc836x_rdk_init_IRQ(void)
......@@ -71,13 +72,14 @@ static void __init mpc836x_rdk_init_IRQ(void)
*/
ipic_set_default_priority();
of_node_put(np);
#ifdef CONFIG_QUICC_ENGINE
np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
if (!np)
return;
qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic);
of_node_put(np);
#endif
}
/*
......
......@@ -26,7 +26,6 @@
#define BCSR12_USB_SER_MASK 0x8a
#define BCSR12_USB_SER_PIN 0x80
#define BCSR12_USB_SER_DEVICE 0x02
extern int mpc837x_usb_cfg(void);
static int mpc837xmds_usb_cfg(void)
{
......
......@@ -21,8 +21,6 @@
#include "mpc83xx.h"
extern int mpc837x_usb_cfg(void);
/* ************************************************************************
*
* Setup the architecture
......
......@@ -61,6 +61,7 @@
extern void mpc83xx_restart(char *cmd);
extern long mpc83xx_time_init(void);
extern int mpc837x_usb_cfg(void);
extern int mpc834x_usb_cfg(void);
extern int mpc831x_usb_cfg(void);
......
......@@ -148,6 +148,9 @@ static int mpc85xx_exclude_device(struct pci_controller *hose,
/*
* Setup the architecture
*/
#ifdef CONFIG_SMP
extern void __init mpc85xx_smp_init(void);
#endif
static void __init mpc85xx_ds_setup_arch(void)
{
#ifdef CONFIG_PCI
......@@ -173,6 +176,10 @@ static void __init mpc85xx_ds_setup_arch(void)
ppc_md.pci_exclude_device = mpc85xx_exclude_device;
#endif
#ifdef CONFIG_SMP
mpc85xx_smp_init();
#endif
printk("MPC85xx DS board from Freescale Semiconductor\n");
}
......
......@@ -58,6 +58,7 @@ smp_85xx_kick_cpu(int nr)
if (cpu_rel_addr == NULL) {
printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
local_irq_restore(flags);
return;
}
......
......@@ -312,4 +312,15 @@ config MPC8xxx_GPIO
Say Y here if you're going to use hardware that connects to the
MPC831x/834x/837x/8572/8610 GPIOs.
config SIMPLE_GPIO
bool "Support for simple, memory-mapped GPIO controllers"
depends on PPC
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
help
Say Y here to support simple, memory-mapped GPIO controllers.
These are usually BCSRs used to control board's switches, LEDs,
chip-selects, Ethernet/USB PHY's power and various other small
on-board peripherals.
endmenu
......@@ -231,7 +231,7 @@ config VIRT_CPU_ACCOUNTING
If in doubt, say Y here.
config SMP
depends on PPC_STD_MMU
depends on PPC_STD_MMU || FSL_BOOKE
bool "Symmetric multi-processing support"
---help---
This enables support for systems with more than one CPU. If you have
......
......@@ -44,8 +44,8 @@ static DEFINE_SPINLOCK(beat_htab_lock);
static inline unsigned int beat_read_mask(unsigned hpte_group)
{
unsigned long hpte_v[5];
unsigned long rmask = 0;
u64 hpte_v[5];
beat_read_htab_entries(0, hpte_group + 0, hpte_v);
if (!(hpte_v[0] & HPTE_V_BOLTED))
......@@ -93,8 +93,7 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group,
int psize, int ssize)
{
unsigned long lpar_rc;
unsigned long slot;
unsigned long hpte_v, hpte_r;
u64 hpte_v, hpte_r, slot;
/* same as iseries */
if (vflags & HPTE_V_SECONDARY)
......@@ -153,8 +152,9 @@ static long beat_lpar_hpte_remove(unsigned long hpte_group)
static unsigned long beat_lpar_hpte_getword0(unsigned long slot)
{
unsigned long dword0, dword[5];
unsigned long dword0;
unsigned long lpar_rc;
u64 dword[5];
lpar_rc = beat_read_htab_entries(0, slot & ~3UL, dword);
......@@ -170,7 +170,7 @@ static void beat_lpar_hptab_clear(void)
unsigned long size_bytes = 1UL << ppc64_pft_size;
unsigned long hpte_count = size_bytes >> 4;
int i;
unsigned long dummy0, dummy1;
u64 dummy0, dummy1;
/* TODO: Use bulk call */
for (i = 0; i < hpte_count; i++)
......@@ -189,7 +189,8 @@ static long beat_lpar_hpte_updatepp(unsigned long slot,
int psize, int ssize, int local)
{
unsigned long lpar_rc;
unsigned long dummy0, dummy1, want_v;
u64 dummy0, dummy1;
unsigned long want_v;
want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
......@@ -255,7 +256,8 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
unsigned long ea,
int psize, int ssize)
{
unsigned long lpar_rc, slot, vsid, va, dummy0, dummy1;
unsigned long lpar_rc, slot, vsid, va;
u64 dummy0, dummy1;
vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
va = (vsid << 28) | (ea & 0x0fffffff);
......@@ -276,7 +278,7 @@ static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
{
unsigned long want_v;
unsigned long lpar_rc;
unsigned long dummy1, dummy2;
u64 dummy1, dummy2;
unsigned long flags;
DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
......@@ -315,8 +317,7 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
int psize, int ssize)
{
unsigned long lpar_rc;
unsigned long slot;
unsigned long hpte_v, hpte_r;
u64 hpte_v, hpte_r, slot;
/* same as iseries */
if (vflags & HPTE_V_SECONDARY)
......
......@@ -40,8 +40,8 @@ static void udbg_putc_beat(char c)
}
/* Buffered chars getc */
static long inbuflen;
static long inbuf[2]; /* must be 2 longs */
static u64 inbuflen;
static u64 inbuf[2]; /* must be 2 u64s */
static int udbg_getc_poll_beat(void)
{
......
......@@ -54,7 +54,7 @@ int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode)
{
struct cbe_pmd_regs __iomem *pmd_regs;
struct cbe_mic_tm_regs __iomem *mic_tm_regs;
u64 flags;
unsigned long flags;
u64 value;
#ifdef DEBUG
long time;
......
......@@ -148,7 +148,7 @@ static unsigned int iic_get_irq(void)
iic = &__get_cpu_var(iic);
*(unsigned long *) &pending =
in_be64((unsigned long __iomem *) &iic->regs->pending_destr);
in_be64((u64 __iomem *) &iic->regs->pending_destr);
if (!(pending.flags & CBE_IIC_IRQ_VALID))
return NO_IRQ;
virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending));
......
......@@ -130,14 +130,14 @@ static const struct ppc_pci_io __devinitconst iowa_pci_io = {
};
static void __iomem *iowa_ioremap(unsigned long addr, unsigned long size,
static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size,
unsigned long flags)
{
struct iowa_bus *bus;
void __iomem *res = __ioremap(addr, size, flags);
int busno;
bus = iowa_pci_find(0, addr);
bus = iowa_pci_find(0, (unsigned long)addr);
if (bus != NULL) {
busno = bus - iowa_busses;
PCI_SET_ADDR_TOKEN(res, busno + 1);
......
......@@ -150,8 +150,8 @@ static int cbe_nr_iommus;
static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
long n_ptes)
{
unsigned long __iomem *reg;
unsigned long val;
u64 __iomem *reg;
u64 val;
long n;
reg = iommu->xlate_regs + IOC_IOPT_CacheInvd;
......
......@@ -10,18 +10,21 @@ menu "iSeries device drivers"
config VIODASD
tristate "iSeries Virtual I/O disk support"
depends on BLOCK
select VIOPATH
help
If you are running on an iSeries system and you want to use
virtual disks created and managed by OS/400, say Y.
config VIOCD
tristate "iSeries Virtual I/O CD support"
select VIOPATH
help
If you are running Linux on an IBM iSeries system and you want to
read a CD drive owned by OS/400, say Y here.
config VIOTAPE
tristate "iSeries Virtual Tape Support"
select VIOPATH
help
If you are running Linux on an iSeries system and you want Linux
to read and/or write a tape drive owned by OS/400, say Y here.
......@@ -30,5 +33,3 @@ endmenu
config VIOPATH
bool
depends on VIODASD || VIOCD || VIOTAPE || ISERIES_VETH
default y
......@@ -23,6 +23,7 @@
#include <linux/string.h>
#include <linux/seq_file.h>
#include <linux/kdev_t.h>
#include <linux/kexec.h>
#include <linux/major.h>
#include <linux/root_dev.h>
#include <linux/kernel.h>
......@@ -638,6 +639,13 @@ static int __init iseries_probe(void)
return 1;
}
#ifdef CONFIG_KEXEC
static int iseries_kexec_prepare(struct kimage *image)
{
return -ENOSYS;
}
#endif
define_machine(iseries) {
.name = "iSeries",
.setup_arch = iSeries_setup_arch,
......@@ -658,6 +666,9 @@ define_machine(iseries) {
.probe = iseries_probe,
.ioremap = iseries_ioremap,
.iounmap = iseries_iounmap,
#ifdef CONFIG_KEXEC
.machine_kexec_prepare = iseries_kexec_prepare,
#endif
/* XXX Implement enable_pmcs for iSeries */
};
......
......@@ -112,7 +112,7 @@ static int get_gizmo_latency(void)
static void set_astate(int cpu, unsigned int astate)
{
u64 flags;
unsigned long flags;
/* Return if called before init has run */
if (unlikely(!sdcasr_mapbase))
......
......@@ -509,7 +509,7 @@ static void *map_onedev(struct pci_dev *p, int index)
*/
int pasemi_dma_init(void)
{
static spinlock_t init_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(init_lock);
struct pci_dev *iob_pdev;
struct pci_dev *pdev;
struct resource res;
......
......@@ -661,6 +661,7 @@ static void __init init_second_ohare(void)
pci_find_hose_for_OF_device(np);
if (!hose) {
printk(KERN_ERR "Can't find PCI hose for OHare2 !\n");
of_node_put(np);
return;
}
early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd);
......@@ -669,6 +670,7 @@ static void __init init_second_ohare(void)
early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd);
}
has_second_ohare = 1;
of_node_put(np);
}
/*
......
......@@ -265,12 +265,15 @@ int __init via_calibrate_decr(void)
struct resource rsrc;
vias = of_find_node_by_name(NULL, "via-cuda");
if (vias == 0)
if (vias == NULL)
vias = of_find_node_by_name(NULL, "via-pmu");
if (vias == 0)
if (vias == NULL)
vias = of_find_node_by_name(NULL, "via");
if (vias == 0 || of_address_to_resource(vias, 0, &rsrc))
if (vias == NULL || of_address_to_resource(vias, 0, &rsrc)) {
of_node_put(vias);
return 0;
}
of_node_put(vias);
via = ioremap(rsrc.start, rsrc.end - rsrc.start + 1);
if (via == NULL) {
printk(KERN_ERR "Failed to map VIA for timer calibration !\n");
......@@ -297,7 +300,7 @@ int __init via_calibrate_decr(void)
ppc_tb_freq = (dstart - dend) * 100 / 6;
iounmap(via);
return 1;
}
#endif
......
......@@ -518,6 +518,41 @@ static int __init ps3_register_graphics_devices(void)
return result;
}
static int __init ps3_register_ramdisk_device(void)
{
int result;
struct layout {
struct ps3_system_bus_device dev;
} *p;
pr_debug(" -> %s:%d\n", __func__, __LINE__);
p = kzalloc(sizeof(struct layout), GFP_KERNEL);
if (!p)
return -ENOMEM;
p->dev.match_id = PS3_MATCH_ID_GPU;
p->dev.match_sub_id = PS3_MATCH_SUB_ID_GPU_RAMDISK;
p->dev.dev_type = PS3_DEVICE_TYPE_IOC0;
result = ps3_system_bus_device_register(&p->dev);
if (result) {
pr_debug("%s:%d ps3_system_bus_device_register failed\n",
__func__, __LINE__);
goto fail_device_register;
}
pr_debug(" <- %s:%d\n", __func__, __LINE__);
return 0;
fail_device_register:
kfree(p);
pr_debug(" <- %s:%d failed\n", __func__, __LINE__);
return result;
}
/**
* ps3_setup_dynamic_device - Setup a dynamic device from the repository
*/
......@@ -946,6 +981,8 @@ static int __init ps3_register_devices(void)
ps3_register_lpm_devices();
ps3_register_ramdisk_device();
pr_debug(" <- %s:%d\n", __func__, __LINE__);
return 0;
}
......
......@@ -17,6 +17,7 @@ obj-$(CONFIG_FSL_PCI) += fsl_pci.o $(fsl-msi-obj-y)
obj-$(CONFIG_FSL_LBC) += fsl_lbc.o
obj-$(CONFIG_FSL_GTM) += fsl_gtm.o
obj-$(CONFIG_MPC8xxx_GPIO) += mpc8xxx_gpio.o
obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o
obj-$(CONFIG_RAPIDIO) += fsl_rio.o
obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o
obj-$(CONFIG_QUICC_ENGINE) += qe_lib/
......
......@@ -29,7 +29,8 @@
#if defined(CONFIG_PPC_85xx) || defined(CONFIG_PPC_86xx)
/* atmu setup for fsl pci/pcie controller */
void __init setup_pci_atmu(struct pci_controller *hose, struct resource *rsrc)
static void __init setup_pci_atmu(struct pci_controller *hose,
struct resource *rsrc)
{
struct ccsr_pci __iomem *pci;
int i;
......@@ -86,7 +87,7 @@ void __init setup_pci_atmu(struct pci_controller *hose, struct resource *rsrc)
out_be32(&pci->piw[2].piwar, PIWAR_2G);
}
void __init setup_pci_cmd(struct pci_controller *hose)
static void __init setup_pci_cmd(struct pci_controller *hose)
{
u16 cmd;
int cap_x;
......@@ -130,7 +131,7 @@ static void __init quirk_fsl_pcie_header(struct pci_dev *dev)
return ;
}
int __init fsl_pcie_check_link(struct pci_controller *hose)
static int __init fsl_pcie_check_link(struct pci_controller *hose)
{
u32 val;
early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val);
......
......@@ -5,8 +5,13 @@
#include <asm/mmu.h>
extern phys_addr_t get_immrbase(void);
#if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx)
extern u32 get_brgfreq(void);
extern u32 get_baudrate(void);
#else
static inline u32 get_brgfreq(void) { return -1; }
static inline u32 get_baudrate(void) { return -1; }
#endif
extern u32 fsl_get_sys_freq(void);
struct spi_board_info;
......
......@@ -22,5 +22,6 @@ config UCC
config QE_USB
bool
default y if USB_GADGET_FSL_QE
help
QE USB Host Controller support
QE USB Controller support
......@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
......@@ -24,8 +25,14 @@ struct qe_gpio_chip {
struct of_mm_gpio_chip mm_gc;
spinlock_t lock;
unsigned long pin_flags[QE_PIO_PINS];
#define QE_PIN_REQUESTED 0
/* shadowed data register to clear/set bits safely */
u32 cpdata;
/* saved_regs used to restore dedicated functions */
struct qe_pio_regs saved_regs;
};
static inline struct qe_gpio_chip *
......@@ -40,6 +47,12 @@ static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
struct qe_pio_regs __iomem *regs = mm_gc->regs;
qe_gc->cpdata = in_be32(&regs->cpdata);
qe_gc->saved_regs.cpdata = qe_gc->cpdata;
qe_gc->saved_regs.cpdir1 = in_be32(&regs->cpdir1);
qe_gc->saved_regs.cpdir2 = in_be32(&regs->cpdir2);
qe_gc->saved_regs.cppar1 = in_be32(&regs->cppar1);
qe_gc->saved_regs.cppar2 = in_be32(&regs->cppar2);
qe_gc->saved_regs.cpodr = in_be32(&regs->cpodr);
}
static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
......@@ -103,6 +116,188 @@ static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
return 0;
}
struct qe_pin {
/*
* The qe_gpio_chip name is unfortunate, we should change that to
* something like qe_pio_controller. Someday.
*/
struct qe_gpio_chip *controller;
int num;
};
/**
* qe_pin_request - Request a QE pin
* @np: device node to get a pin from
* @index: index of a pin in the device tree
* Context: non-atomic
*
* This function return qe_pin so that you could use it with the rest of
* the QE Pin Multiplexing API.
*/
struct qe_pin *qe_pin_request(struct device_node *np, int index)
{
struct qe_pin *qe_pin;
struct device_node *gc;
struct of_gpio_chip *of_gc = NULL;
struct of_mm_gpio_chip *mm_gc;
struct qe_gpio_chip *qe_gc;
int err;
int size;
const void *gpio_spec;
const u32 *gpio_cells;
unsigned long flags;
qe_pin = kzalloc(sizeof(*qe_pin), GFP_KERNEL);
if (!qe_pin) {
pr_debug("%s: can't allocate memory\n", __func__);
return ERR_PTR(-ENOMEM);
}
err = of_parse_phandles_with_args(np, "gpios", "#gpio-cells", index,
&gc, &gpio_spec);
if (err) {
pr_debug("%s: can't parse gpios property\n", __func__);
goto err0;
}
if (!of_device_is_compatible(gc, "fsl,mpc8323-qe-pario-bank")) {
pr_debug("%s: tried to get a non-qe pin\n", __func__);
err = -EINVAL;
goto err1;
}
of_gc = gc->data;
if (!of_gc) {
pr_debug("%s: gpio controller %s isn't registered\n",
np->full_name, gc->full_name);
err = -ENODEV;
goto err1;
}
gpio_cells = of_get_property(gc, "#gpio-cells", &size);
if (!gpio_cells || size != sizeof(*gpio_cells) ||
*gpio_cells != of_gc->gpio_cells) {
pr_debug("%s: wrong #gpio-cells for %s\n",
np->full_name, gc->full_name);
err = -EINVAL;
goto err1;
}
err = of_gc->xlate(of_gc, np, gpio_spec, NULL);
if (err < 0)
goto err1;
mm_gc = to_of_mm_gpio_chip(&of_gc->gc);
qe_gc = to_qe_gpio_chip(mm_gc);
spin_lock_irqsave(&qe_gc->lock, flags);
if (test_and_set_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[err]) == 0) {
qe_pin->controller = qe_gc;
qe_pin->num = err;
err = 0;
} else {
err = -EBUSY;
}
spin_unlock_irqrestore(&qe_gc->lock, flags);
if (!err)
return qe_pin;
err1:
of_node_put(gc);
err0:
kfree(qe_pin);
pr_debug("%s failed with status %d\n", __func__, err);
return ERR_PTR(err);
}
EXPORT_SYMBOL(qe_pin_request);
/**
* qe_pin_free - Free a pin
* @qe_pin: pointer to the qe_pin structure
* Context: any
*
* This function frees the qe_pin structure and makes a pin available
* for further qe_pin_request() calls.
*/
void qe_pin_free(struct qe_pin *qe_pin)
{
struct qe_gpio_chip *qe_gc = qe_pin->controller;
unsigned long flags;
const int pin = qe_pin->num;
spin_lock_irqsave(&qe_gc->lock, flags);
test_and_clear_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[pin]);
spin_unlock_irqrestore(&qe_gc->lock, flags);
kfree(qe_pin);
}
EXPORT_SYMBOL(qe_pin_free);
/**
* qe_pin_set_dedicated - Revert a pin to a dedicated peripheral function mode
* @qe_pin: pointer to the qe_pin structure
* Context: any
*
* This function resets a pin to a dedicated peripheral function that
* has been set up by the firmware.
*/
void qe_pin_set_dedicated(struct qe_pin *qe_pin)
{
struct qe_gpio_chip *qe_gc = qe_pin->controller;
struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
struct qe_pio_regs *sregs = &qe_gc->saved_regs;
int pin = qe_pin->num;
u32 mask1 = 1 << (QE_PIO_PINS - (pin + 1));
u32 mask2 = 0x3 << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2);
bool second_reg = pin > (QE_PIO_PINS / 2) - 1;
unsigned long flags;
spin_lock_irqsave(&qe_gc->lock, flags);
if (second_reg) {
clrsetbits_be32(&regs->cpdir2, mask2, sregs->cpdir2 & mask2);
clrsetbits_be32(&regs->cppar2, mask2, sregs->cppar2 & mask2);
} else {
clrsetbits_be32(&regs->cpdir1, mask2, sregs->cpdir1 & mask2);
clrsetbits_be32(&regs->cppar1, mask2, sregs->cppar1 & mask2);
}
if (sregs->cpdata & mask1)
qe_gc->cpdata |= mask1;
else
qe_gc->cpdata &= ~mask1;
out_be32(&regs->cpdata, qe_gc->cpdata);
clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
spin_unlock_irqrestore(&qe_gc->lock, flags);
}
EXPORT_SYMBOL(qe_pin_set_dedicated);
/**
* qe_pin_set_gpio - Set a pin to the GPIO mode
* @qe_pin: pointer to the qe_pin structure
* Context: any
*
* This function sets a pin to the GPIO mode.
*/
void qe_pin_set_gpio(struct qe_pin *qe_pin)
{
struct qe_gpio_chip *qe_gc = qe_pin->controller;
struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
unsigned long flags;
spin_lock_irqsave(&qe_gc->lock, flags);
/* Let's make it input by default, GPIO API is able to change that. */
__par_io_config_pin(regs, qe_pin->num, QE_PIO_DIR_IN, 0, 0, 0);
spin_unlock_irqrestore(&qe_gc->lock, flags);
}
EXPORT_SYMBOL(qe_pin_set_gpio);
static int __init qe_add_gpiochips(void)
{
struct device_node *np;
......
/*
* Simple Memory-Mapped GPIOs
*
* Copyright (c) MontaVista Software, Inc. 2008.
*
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/gpio.h>
#include <asm/prom.h>
#include "simple_gpio.h"
struct u8_gpio_chip {
struct of_mm_gpio_chip mm_gc;
spinlock_t lock;
/* shadowed data register to clear/set bits safely */
u8 data;
};
static struct u8_gpio_chip *to_u8_gpio_chip(struct of_mm_gpio_chip *mm_gc)
{
return container_of(mm_gc, struct u8_gpio_chip, mm_gc);
}
static u8 u8_pin2mask(unsigned int pin)
{
return 1 << (8 - 1 - pin);
}
static int u8_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
return in_8(mm_gc->regs) & u8_pin2mask(gpio);
}
static void u8_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct u8_gpio_chip *u8_gc = to_u8_gpio_chip(mm_gc);
unsigned long flags;
spin_lock_irqsave(&u8_gc->lock, flags);
if (val)
u8_gc->data |= u8_pin2mask(gpio);
else
u8_gc->data &= ~u8_pin2mask(gpio);
out_8(mm_gc->regs, u8_gc->data);
spin_unlock_irqrestore(&u8_gc->lock, flags);
}
static int u8_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
return 0;
}
static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
u8_gpio_set(gc, gpio, val);
return 0;
}
static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
{
struct u8_gpio_chip *u8_gc = to_u8_gpio_chip(mm_gc);
u8_gc->data = in_8(mm_gc->regs);
}
static int __init u8_simple_gpiochip_add(struct device_node *np)
{
int ret;
struct u8_gpio_chip *u8_gc;
struct of_mm_gpio_chip *mm_gc;
struct of_gpio_chip *of_gc;
struct gpio_chip *gc;
u8_gc = kzalloc(sizeof(*u8_gc), GFP_KERNEL);
if (!u8_gc)
return -ENOMEM;
spin_lock_init(&u8_gc->lock);
mm_gc = &u8_gc->mm_gc;
of_gc = &mm_gc->of_gc;
gc = &of_gc->gc;
mm_gc->save_regs = u8_gpio_save_regs;
of_gc->gpio_cells = 2;
gc->ngpio = 8;
gc->direction_input = u8_gpio_dir_in;
gc->direction_output = u8_gpio_dir_out;
gc->get = u8_gpio_get;
gc->set = u8_gpio_set;
ret = of_mm_gpiochip_add(np, mm_gc);
if (ret)
goto err;
return 0;
err:
kfree(u8_gc);
return ret;
}
void __init simple_gpiochip_init(const char *compatible)
{
struct device_node *np;
for_each_compatible_node(np, NULL, compatible) {
int ret;
struct resource r;
ret = of_address_to_resource(np, 0, &r);
if (ret)
goto err;
switch (resource_size(&r)) {
case 1:
ret = u8_simple_gpiochip_add(np);
if (ret)
goto err;
break;
default:
/*
* Whenever you need support for GPIO bank width > 1,
* please just turn u8_ code into huge macros, and
* construct needed uX_ code with it.
*/
ret = -ENOSYS;
goto err;
}
continue;
err:
pr_err("%s: registration failed, status %d\n",
np->full_name, ret);
}
}
#ifndef __SYSDEV_SIMPLE_GPIO_H
#define __SYSDEV_SIMPLE_GPIO_H
#include <linux/errno.h>
#ifdef CONFIG_SIMPLE_GPIO
extern void simple_gpiochip_init(const char *compatible);
#else
static inline void simple_gpiochip_init(const char *compatible) {}
#endif /* CONFIG_SIMPLE_GPIO */
#endif /* __SYSDEV_SIMPLE_GPIO_H */
......@@ -616,6 +616,7 @@ config HVC_ISERIES
default y
select HVC_DRIVER
select HVC_IRQ
select VIOPATH
help
iSeries machines support a hypervisor virtual console.
......
......@@ -44,7 +44,7 @@ static int hvc_beat_get_chars(uint32_t vtermno, char *buf, int cnt)
static unsigned char q[sizeof(unsigned long) * 2]
__attribute__((aligned(sizeof(unsigned long))));
static int qlen = 0;
unsigned long got;
u64 got;
again:
if (qlen) {
......@@ -63,7 +63,7 @@ static int hvc_beat_get_chars(uint32_t vtermno, char *buf, int cnt)
}
}
if (beat_get_term_char(vtermno, &got,
((unsigned long *)q), ((unsigned long *)q) + 1) == 0) {
((u64 *)q), ((u64 *)q) + 1) == 0) {
qlen = got;
goto again;
}
......
......@@ -120,6 +120,13 @@ config MTD_PHRAM
doesn't have access to, memory beyond the mem=xxx limit, nvram,
memory on the video card, etc...
config MTD_PS3VRAM
tristate "PS3 video RAM"
depends on FB_PS3
help
This driver allows you to use excess PS3 video RAM as volatile
storage or system swap.
config MTD_LART
tristate "28F160xx flash driver for LART"
depends on SA1100_LART
......
......@@ -16,3 +16,4 @@ obj-$(CONFIG_MTD_LART) += lart.o
obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
obj-$(CONFIG_MTD_M25P80) += m25p80.o
obj-$(CONFIG_MTD_PS3VRAM) += ps3vram.o
/**
* ps3vram - Use extra PS3 video ram as MTD block device.
*
* Copyright (c) 2007-2008 Jim Paris <jim@jtan.com>
* Added support RSX DMA Vivien Chappelier <vivien.chappelier@free.fr>
*/
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/version.h>
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
#include <asm/lv1call.h>
#include <asm/ps3.h>
#define DEVICE_NAME "ps3vram"
#define XDR_BUF_SIZE (2 * 1024 * 1024) /* XDR buffer (must be 1MiB aligned) */
#define XDR_IOIF 0x0c000000
#define FIFO_BASE XDR_IOIF
#define FIFO_SIZE (64 * 1024)
#define DMA_PAGE_SIZE (4 * 1024)
#define CACHE_PAGE_SIZE (256 * 1024)
#define CACHE_PAGE_COUNT ((XDR_BUF_SIZE - FIFO_SIZE) / CACHE_PAGE_SIZE)
#define CACHE_OFFSET CACHE_PAGE_SIZE
#define FIFO_OFFSET 0
#define CTRL_PUT 0x10
#define CTRL_GET 0x11
#define CTRL_TOP 0x15
#define UPLOAD_SUBCH 1
#define DOWNLOAD_SUBCH 2
#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c
#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104
#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT 0x601
struct mtd_info ps3vram_mtd;
#define CACHE_PAGE_PRESENT 1
#define CACHE_PAGE_DIRTY 2
struct ps3vram_tag {
unsigned int address;
unsigned int flags;
};
struct ps3vram_cache {
unsigned int page_count;
unsigned int page_size;
struct ps3vram_tag *tags;
};
struct ps3vram_priv {
u64 memory_handle;
u64 context_handle;
u32 *ctrl;
u32 *reports;
u8 __iomem *ddr_base;
u8 *xdr_buf;
u32 *fifo_base;
u32 *fifo_ptr;
struct device *dev;
struct ps3vram_cache cache;
/* Used to serialize cache/DMA operations */
struct mutex lock;
};
#define DMA_NOTIFIER_HANDLE_BASE 0x66604200 /* first DMA notifier handle */
#define DMA_NOTIFIER_OFFSET_BASE 0x1000 /* first DMA notifier offset */
#define DMA_NOTIFIER_SIZE 0x40
#define NOTIFIER 7 /* notifier used for completion report */
/* A trailing '-' means to subtract off ps3fb_videomemory.size */
char *size = "256M-";
module_param(size, charp, 0);
MODULE_PARM_DESC(size, "memory size");
static u32 *ps3vram_get_notifier(u32 *reports, int notifier)
{
return (void *) reports +
DMA_NOTIFIER_OFFSET_BASE +
DMA_NOTIFIER_SIZE * notifier;
}
static void ps3vram_notifier_reset(struct mtd_info *mtd)
{
int i;
struct ps3vram_priv *priv = mtd->priv;
u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER);
for (i = 0; i < 4; i++)
notify[i] = 0xffffffff;
}
static int ps3vram_notifier_wait(struct mtd_info *mtd, unsigned int timeout_ms)
{
struct ps3vram_priv *priv = mtd->priv;
u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER);
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
do {
if (!notify[3])
return 0;
msleep(1);
} while (time_before(jiffies, timeout));
return -ETIMEDOUT;
}
static void ps3vram_init_ring(struct mtd_info *mtd)
{
struct ps3vram_priv *priv = mtd->priv;
priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET;
priv->ctrl[CTRL_GET] = FIFO_BASE + FIFO_OFFSET;
}
static int ps3vram_wait_ring(struct mtd_info *mtd, unsigned int timeout_ms)
{
struct ps3vram_priv *priv = mtd->priv;
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
do {
if (priv->ctrl[CTRL_PUT] == priv->ctrl[CTRL_GET])
return 0;
msleep(1);
} while (time_before(jiffies, timeout));
dev_dbg(priv->dev, "%s:%d: FIFO timeout (%08x/%08x/%08x)\n", __func__,
__LINE__, priv->ctrl[CTRL_PUT], priv->ctrl[CTRL_GET],
priv->ctrl[CTRL_TOP]);
return -ETIMEDOUT;
}
static void ps3vram_out_ring(struct ps3vram_priv *priv, u32 data)
{
*(priv->fifo_ptr)++ = data;
}
static void ps3vram_begin_ring(struct ps3vram_priv *priv, u32 chan,
u32 tag, u32 size)
{
ps3vram_out_ring(priv, (size << 18) | (chan << 13) | tag);
}
static void ps3vram_rewind_ring(struct mtd_info *mtd)
{
struct ps3vram_priv *priv = mtd->priv;
u64 status;
ps3vram_out_ring(priv, 0x20000000 | (FIFO_BASE + FIFO_OFFSET));
priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET;
/* asking the HV for a blit will kick the fifo */
status = lv1_gpu_context_attribute(priv->context_handle,
L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT,
0, 0, 0, 0);
if (status)
dev_err(priv->dev, "%s:%d: lv1_gpu_context_attribute failed\n",
__func__, __LINE__);
priv->fifo_ptr = priv->fifo_base;
}
static void ps3vram_fire_ring(struct mtd_info *mtd)
{
struct ps3vram_priv *priv = mtd->priv;
u64 status;
mutex_lock(&ps3_gpu_mutex);
priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET +
(priv->fifo_ptr - priv->fifo_base) * sizeof(u32);
/* asking the HV for a blit will kick the fifo */
status = lv1_gpu_context_attribute(priv->context_handle,
L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT,
0, 0, 0, 0);
if (status)
dev_err(priv->dev, "%s:%d: lv1_gpu_context_attribute failed\n",
__func__, __LINE__);
if ((priv->fifo_ptr - priv->fifo_base) * sizeof(u32) >
FIFO_SIZE - 1024) {
dev_dbg(priv->dev, "%s:%d: fifo full, rewinding\n", __func__,
__LINE__);
ps3vram_wait_ring(mtd, 200);
ps3vram_rewind_ring(mtd);
}
mutex_unlock(&ps3_gpu_mutex);
}
static void ps3vram_bind(struct mtd_info *mtd)
{
struct ps3vram_priv *priv = mtd->priv;
ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0, 1);
ps3vram_out_ring(priv, 0x31337303);
ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x180, 3);
ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER);
ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */
ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */
ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0, 1);
ps3vram_out_ring(priv, 0x3137c0de);
ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x180, 3);
ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER);
ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */
ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */
ps3vram_fire_ring(mtd);
}
static int ps3vram_upload(struct mtd_info *mtd, unsigned int src_offset,
unsigned int dst_offset, int len, int count)
{
struct ps3vram_priv *priv = mtd->priv;
ps3vram_begin_ring(priv, UPLOAD_SUBCH,
NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
ps3vram_out_ring(priv, XDR_IOIF + src_offset);
ps3vram_out_ring(priv, dst_offset);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, count);
ps3vram_out_ring(priv, (1 << 8) | 1);
ps3vram_out_ring(priv, 0);
ps3vram_notifier_reset(mtd);
ps3vram_begin_ring(priv, UPLOAD_SUBCH,
NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1);
ps3vram_out_ring(priv, 0);
ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x100, 1);
ps3vram_out_ring(priv, 0);
ps3vram_fire_ring(mtd);
if (ps3vram_notifier_wait(mtd, 200) < 0) {
dev_dbg(priv->dev, "%s:%d: notifier timeout\n", __func__,
__LINE__);
return -1;
}
return 0;
}
static int ps3vram_download(struct mtd_info *mtd, unsigned int src_offset,
unsigned int dst_offset, int len, int count)
{
struct ps3vram_priv *priv = mtd->priv;
ps3vram_begin_ring(priv, DOWNLOAD_SUBCH,
NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
ps3vram_out_ring(priv, src_offset);
ps3vram_out_ring(priv, XDR_IOIF + dst_offset);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, count);
ps3vram_out_ring(priv, (1 << 8) | 1);
ps3vram_out_ring(priv, 0);
ps3vram_notifier_reset(mtd);
ps3vram_begin_ring(priv, DOWNLOAD_SUBCH,
NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1);
ps3vram_out_ring(priv, 0);
ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x100, 1);
ps3vram_out_ring(priv, 0);
ps3vram_fire_ring(mtd);
if (ps3vram_notifier_wait(mtd, 200) < 0) {
dev_dbg(priv->dev, "%s:%d: notifier timeout\n", __func__,
__LINE__);
return -1;
}
return 0;
}
static void ps3vram_cache_evict(struct mtd_info *mtd, int entry)
{
struct ps3vram_priv *priv = mtd->priv;
struct ps3vram_cache *cache = &priv->cache;
if (cache->tags[entry].flags & CACHE_PAGE_DIRTY) {
dev_dbg(priv->dev, "%s:%d: flushing %d : 0x%08x\n", __func__,
__LINE__, entry, cache->tags[entry].address);
if (ps3vram_upload(mtd,
CACHE_OFFSET + entry * cache->page_size,
cache->tags[entry].address,
DMA_PAGE_SIZE,
cache->page_size / DMA_PAGE_SIZE) < 0) {
dev_dbg(priv->dev, "%s:%d: failed to upload from "
"0x%x to 0x%x size 0x%x\n", __func__, __LINE__,
entry * cache->page_size,
cache->tags[entry].address, cache->page_size);
}
cache->tags[entry].flags &= ~CACHE_PAGE_DIRTY;
}
}
static void ps3vram_cache_load(struct mtd_info *mtd, int entry,
unsigned int address)
{
struct ps3vram_priv *priv = mtd->priv;
struct ps3vram_cache *cache = &priv->cache;
dev_dbg(priv->dev, "%s:%d: fetching %d : 0x%08x\n", __func__, __LINE__,
entry, address);
if (ps3vram_download(mtd,
address,
CACHE_OFFSET + entry * cache->page_size,
DMA_PAGE_SIZE,
cache->page_size / DMA_PAGE_SIZE) < 0) {
dev_err(priv->dev, "%s:%d: failed to download from "
"0x%x to 0x%x size 0x%x\n", __func__, __LINE__, address,
entry * cache->page_size, cache->page_size);
}
cache->tags[entry].address = address;
cache->tags[entry].flags |= CACHE_PAGE_PRESENT;
}
static void ps3vram_cache_flush(struct mtd_info *mtd)
{
struct ps3vram_priv *priv = mtd->priv;
struct ps3vram_cache *cache = &priv->cache;
int i;
dev_dbg(priv->dev, "%s:%d: FLUSH\n", __func__, __LINE__);
for (i = 0; i < cache->page_count; i++) {
ps3vram_cache_evict(mtd, i);
cache->tags[i].flags = 0;
}
}
static unsigned int ps3vram_cache_match(struct mtd_info *mtd, loff_t address)
{
struct ps3vram_priv *priv = mtd->priv;
struct ps3vram_cache *cache = &priv->cache;
unsigned int base;
unsigned int offset;
int i;
static int counter;
offset = (unsigned int) (address & (cache->page_size - 1));
base = (unsigned int) (address - offset);
/* fully associative check */
for (i = 0; i < cache->page_count; i++) {
if ((cache->tags[i].flags & CACHE_PAGE_PRESENT) &&
cache->tags[i].address == base) {
dev_dbg(priv->dev, "%s:%d: found entry %d : 0x%08x\n",
__func__, __LINE__, i, cache->tags[i].address);
return i;
}
}
/* choose a random entry */
i = (jiffies + (counter++)) % cache->page_count;
dev_dbg(priv->dev, "%s:%d: using entry %d\n", __func__, __LINE__, i);
ps3vram_cache_evict(mtd, i);
ps3vram_cache_load(mtd, i, base);
return i;
}
static int ps3vram_cache_init(struct mtd_info *mtd)
{
struct ps3vram_priv *priv = mtd->priv;
priv->cache.page_count = CACHE_PAGE_COUNT;
priv->cache.page_size = CACHE_PAGE_SIZE;
priv->cache.tags = kzalloc(sizeof(struct ps3vram_tag) *
CACHE_PAGE_COUNT, GFP_KERNEL);
if (priv->cache.tags == NULL) {
dev_err(priv->dev, "%s:%d: could not allocate cache tags\n",
__func__, __LINE__);
return -ENOMEM;
}
dev_info(priv->dev, "created ram cache: %d entries, %d KiB each\n",
CACHE_PAGE_COUNT, CACHE_PAGE_SIZE / 1024);
return 0;
}
static void ps3vram_cache_cleanup(struct mtd_info *mtd)
{
struct ps3vram_priv *priv = mtd->priv;
ps3vram_cache_flush(mtd);
kfree(priv->cache.tags);
}
static int ps3vram_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct ps3vram_priv *priv = mtd->priv;
if (instr->addr + instr->len > mtd->size)
return -EINVAL;
mutex_lock(&priv->lock);
ps3vram_cache_flush(mtd);
/* Set bytes to 0xFF */
memset_io(priv->ddr_base + instr->addr, 0xFF, instr->len);
mutex_unlock(&priv->lock);
instr->state = MTD_ERASE_DONE;
mtd_erase_callback(instr);
return 0;
}
static int ps3vram_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct ps3vram_priv *priv = mtd->priv;
unsigned int cached, count;
dev_dbg(priv->dev, "%s:%d: from=0x%08x len=0x%zx\n", __func__, __LINE__,
(unsigned int)from, len);
if (from >= mtd->size)
return -EINVAL;
if (len > mtd->size - from)
len = mtd->size - from;
/* Copy from vram to buf */
count = len;
while (count) {
unsigned int offset, avail;
unsigned int entry;
offset = (unsigned int) (from & (priv->cache.page_size - 1));
avail = priv->cache.page_size - offset;
mutex_lock(&priv->lock);
entry = ps3vram_cache_match(mtd, from);
cached = CACHE_OFFSET + entry * priv->cache.page_size + offset;
dev_dbg(priv->dev, "%s:%d: from=%08x cached=%08x offset=%08x "
"avail=%08x count=%08x\n", __func__, __LINE__,
(unsigned int)from, cached, offset, avail, count);
if (avail > count)
avail = count;
memcpy(buf, priv->xdr_buf + cached, avail);
mutex_unlock(&priv->lock);
buf += avail;
count -= avail;
from += avail;
}
*retlen = len;
return 0;
}
static int ps3vram_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct ps3vram_priv *priv = mtd->priv;
unsigned int cached, count;
if (to >= mtd->size)
return -EINVAL;
if (len > mtd->size - to)
len = mtd->size - to;
/* Copy from buf to vram */
count = len;
while (count) {
unsigned int offset, avail;
unsigned int entry;
offset = (unsigned int) (to & (priv->cache.page_size - 1));
avail = priv->cache.page_size - offset;
mutex_lock(&priv->lock);
entry = ps3vram_cache_match(mtd, to);
cached = CACHE_OFFSET + entry * priv->cache.page_size + offset;
dev_dbg(priv->dev, "%s:%d: to=%08x cached=%08x offset=%08x "
"avail=%08x count=%08x\n", __func__, __LINE__,
(unsigned int)to, cached, offset, avail, count);
if (avail > count)
avail = count;
memcpy(priv->xdr_buf + cached, buf, avail);
priv->cache.tags[entry].flags |= CACHE_PAGE_DIRTY;
mutex_unlock(&priv->lock);
buf += avail;
count -= avail;
to += avail;
}
*retlen = len;
return 0;
}
static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv;
int status;
u64 ddr_lpar;
u64 ctrl_lpar;
u64 info_lpar;
u64 reports_lpar;
u64 ddr_size;
u64 reports_size;
int ret = -ENOMEM;
char *rest;
ret = -EIO;
ps3vram_mtd.priv = kzalloc(sizeof(struct ps3vram_priv), GFP_KERNEL);
if (!ps3vram_mtd.priv)
goto out;
priv = ps3vram_mtd.priv;
mutex_init(&priv->lock);
priv->dev = &dev->core;
/* Allocate XDR buffer (1MiB aligned) */
priv->xdr_buf = (void *)__get_free_pages(GFP_KERNEL,
get_order(XDR_BUF_SIZE));
if (priv->xdr_buf == NULL) {
dev_dbg(&dev->core, "%s:%d: could not allocate XDR buffer\n",
__func__, __LINE__);
ret = -ENOMEM;
goto out_free_priv;
}
/* Put FIFO at begginning of XDR buffer */
priv->fifo_base = (u32 *) (priv->xdr_buf + FIFO_OFFSET);
priv->fifo_ptr = priv->fifo_base;
/* XXX: Need to open GPU, in case ps3fb or snd_ps3 aren't loaded */
if (ps3_open_hv_device(dev)) {
dev_err(&dev->core, "%s:%d: ps3_open_hv_device failed\n",
__func__, __LINE__);
ret = -EAGAIN;
goto out_close_gpu;
}
/* Request memory */
status = -1;
ddr_size = memparse(size, &rest);
if (*rest == '-')
ddr_size -= ps3fb_videomemory.size;
ddr_size = ALIGN(ddr_size, 1024*1024);
if (ddr_size <= 0) {
dev_err(&dev->core, "%s:%d: specified size is too small\n",
__func__, __LINE__);
ret = -EINVAL;
goto out_close_gpu;
}
while (ddr_size > 0) {
status = lv1_gpu_memory_allocate(ddr_size, 0, 0, 0, 0,
&priv->memory_handle,
&ddr_lpar);
if (!status)
break;
ddr_size -= 1024*1024;
}
if (status || ddr_size <= 0) {
dev_err(&dev->core, "%s:%d: lv1_gpu_memory_allocate failed\n",
__func__, __LINE__);
ret = -ENOMEM;
goto out_free_xdr_buf;
}
/* Request context */
status = lv1_gpu_context_allocate(priv->memory_handle,
0,
&priv->context_handle,
&ctrl_lpar,
&info_lpar,
&reports_lpar,
&reports_size);
if (status) {
dev_err(&dev->core, "%s:%d: lv1_gpu_context_allocate failed\n",
__func__, __LINE__);
ret = -ENOMEM;
goto out_free_memory;
}
/* Map XDR buffer to RSX */
status = lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF,
ps3_mm_phys_to_lpar(__pa(priv->xdr_buf)),
XDR_BUF_SIZE, 0);
if (status) {
dev_err(&dev->core, "%s:%d: lv1_gpu_context_iomap failed\n",
__func__, __LINE__);
ret = -ENOMEM;
goto out_free_context;
}
priv->ddr_base = ioremap_flags(ddr_lpar, ddr_size, _PAGE_NO_CACHE);
if (!priv->ddr_base) {
dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__,
__LINE__);
ret = -ENOMEM;
goto out_free_context;
}
priv->ctrl = ioremap(ctrl_lpar, 64 * 1024);
if (!priv->ctrl) {
dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__,
__LINE__);
ret = -ENOMEM;
goto out_unmap_vram;
}
priv->reports = ioremap(reports_lpar, reports_size);
if (!priv->reports) {
dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__,
__LINE__);
ret = -ENOMEM;
goto out_unmap_ctrl;
}
mutex_lock(&ps3_gpu_mutex);
ps3vram_init_ring(&ps3vram_mtd);
mutex_unlock(&ps3_gpu_mutex);
ps3vram_mtd.name = "ps3vram";
ps3vram_mtd.size = ddr_size;
ps3vram_mtd.flags = MTD_CAP_RAM;
ps3vram_mtd.erase = ps3vram_erase;
ps3vram_mtd.point = NULL;
ps3vram_mtd.unpoint = NULL;
ps3vram_mtd.read = ps3vram_read;
ps3vram_mtd.write = ps3vram_write;
ps3vram_mtd.owner = THIS_MODULE;
ps3vram_mtd.type = MTD_RAM;
ps3vram_mtd.erasesize = CACHE_PAGE_SIZE;
ps3vram_mtd.writesize = 1;
ps3vram_bind(&ps3vram_mtd);
mutex_lock(&ps3_gpu_mutex);
ret = ps3vram_wait_ring(&ps3vram_mtd, 100);
mutex_unlock(&ps3_gpu_mutex);
if (ret < 0) {
dev_err(&dev->core, "%s:%d: failed to initialize channels\n",
__func__, __LINE__);
ret = -ETIMEDOUT;
goto out_unmap_reports;
}
ps3vram_cache_init(&ps3vram_mtd);
if (add_mtd_device(&ps3vram_mtd)) {
dev_err(&dev->core, "%s:%d: add_mtd_device failed\n",
__func__, __LINE__);
ret = -EAGAIN;
goto out_cache_cleanup;
}
dev_info(&dev->core, "reserved %u MiB of gpu memory\n",
(unsigned int)(ddr_size / 1024 / 1024));
return 0;
out_cache_cleanup:
ps3vram_cache_cleanup(&ps3vram_mtd);
out_unmap_reports:
iounmap(priv->reports);
out_unmap_ctrl:
iounmap(priv->ctrl);
out_unmap_vram:
iounmap(priv->ddr_base);
out_free_context:
lv1_gpu_context_free(priv->context_handle);
out_free_memory:
lv1_gpu_memory_free(priv->memory_handle);
out_close_gpu:
ps3_close_hv_device(dev);
out_free_xdr_buf:
free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE));
out_free_priv:
kfree(ps3vram_mtd.priv);
ps3vram_mtd.priv = NULL;
out:
return ret;
}
static int ps3vram_shutdown(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv;
priv = ps3vram_mtd.priv;
del_mtd_device(&ps3vram_mtd);
ps3vram_cache_cleanup(&ps3vram_mtd);
iounmap(priv->reports);
iounmap(priv->ctrl);
iounmap(priv->ddr_base);
lv1_gpu_context_free(priv->context_handle);
lv1_gpu_memory_free(priv->memory_handle);
ps3_close_hv_device(dev);
free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE));
kfree(priv);
return 0;
}
static struct ps3_system_bus_driver ps3vram_driver = {
.match_id = PS3_MATCH_ID_GPU,
.match_sub_id = PS3_MATCH_SUB_ID_GPU_RAMDISK,
.core.name = DEVICE_NAME,
.core.owner = THIS_MODULE,
.probe = ps3vram_probe,
.remove = ps3vram_shutdown,
.shutdown = ps3vram_shutdown,
};
static int __init ps3vram_init(void)
{
return ps3_system_bus_driver_register(&ps3vram_driver);
}
static void __exit ps3vram_exit(void)
{
ps3_system_bus_driver_unregister(&ps3vram_driver);
}
module_init(ps3vram_init);
module_exit(ps3vram_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jim Paris <jim@jtan.com>");
MODULE_DESCRIPTION("MTD driver for PS3 video RAM");
MODULE_ALIAS(PS3_MODULE_ALIAS_GPU_RAMDISK);
......@@ -884,6 +884,7 @@ config SCSI_IBMVSCSI
tristate "IBM Virtual SCSI support"
depends on PPC_PSERIES || PPC_ISERIES
select SCSI_SRP_ATTRS
select VIOPATH if PPC_ISERIES
help
This is the IBM POWER Virtual SCSI Client
......
......@@ -1320,13 +1320,30 @@ config SERIAL_NETX_CONSOLE
config SERIAL_OF_PLATFORM
tristate "Serial port on Open Firmware platform bus"
depends on PPC_OF
depends on SERIAL_8250
depends on SERIAL_8250 || SERIAL_OF_PLATFORM_NWPSERIAL
help
If you have a PowerPC based system that has serial ports
on a platform specific bus, you should enable this option.
Currently, only 8250 compatible ports are supported, but
others can easily be added.
config SERIAL_OF_PLATFORM_NWPSERIAL
tristate "NWP serial port driver"
depends on PPC_OF && PPC_DCR
select SERIAL_OF_PLATFORM
select SERIAL_CORE_CONSOLE
select SERIAL_CORE
help
This driver supports the cell network processor nwp serial
device.
config SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE
bool "Console on NWP serial port"
depends on SERIAL_OF_PLATFORM_NWPSERIAL=y
select SERIAL_CORE_CONSOLE
help
Support for Console on the NWP serial ports.
config SERIAL_QE
tristate "Freescale QUICC Engine serial port support"
depends on QUICC_ENGINE
......
......@@ -72,6 +72,7 @@ obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o
obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o
obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
/*
* Serial Port driver for a NWP uart device
*
* Copyright (C) 2008 IBM Corp., Benjamin Krill <ben@codiert.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/init.h>
#include <linux/console.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/serial_core.h>
#include <linux/tty.h>
#include <linux/irqreturn.h>
#include <linux/mutex.h>
#include <linux/of_platform.h>
#include <linux/of_device.h>
#include <linux/nwpserial.h>
#include <asm/prom.h>
#include <asm/dcr.h>
#define NWPSERIAL_NR 2
#define NWPSERIAL_STATUS_RXVALID 0x1
#define NWPSERIAL_STATUS_TXFULL 0x2
struct nwpserial_port {
struct uart_port port;
dcr_host_t dcr_host;
unsigned int ier;
unsigned int mcr;
};
static DEFINE_MUTEX(nwpserial_mutex);
static struct nwpserial_port nwpserial_ports[NWPSERIAL_NR];
static void wait_for_bits(struct nwpserial_port *up, int bits)
{
unsigned int status, tmout = 10000;
/* Wait up to 10ms for the character(s) to be sent. */
do {
status = dcr_read(up->dcr_host, UART_LSR);
if (--tmout == 0)
break;
udelay(1);
} while ((status & bits) != bits);
}
#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE
static void nwpserial_console_putchar(struct uart_port *port, int c)
{
struct nwpserial_port *up;
up = container_of(port, struct nwpserial_port, port);
/* check if tx buffer is full */
wait_for_bits(up, UART_LSR_THRE);
dcr_write(up->dcr_host, UART_TX, c);
up->port.icount.tx++;
}
static void
nwpserial_console_write(struct console *co, const char *s, unsigned int count)
{
struct nwpserial_port *up = &nwpserial_ports[co->index];
unsigned long flags;
int locked = 1;
if (oops_in_progress)
locked = spin_trylock_irqsave(&up->port.lock, flags);
else
spin_lock_irqsave(&up->port.lock, flags);
/* save and disable interrupt */
up->ier = dcr_read(up->dcr_host, UART_IER);
dcr_write(up->dcr_host, UART_IER, up->ier & ~UART_IER_RDI);
uart_console_write(&up->port, s, count, nwpserial_console_putchar);
/* wait for transmitter to become emtpy */
while ((dcr_read(up->dcr_host, UART_LSR) & UART_LSR_THRE) == 0)
cpu_relax();
/* restore interrupt state */
dcr_write(up->dcr_host, UART_IER, up->ier);
if (locked)
spin_unlock_irqrestore(&up->port.lock, flags);
}
static struct uart_driver nwpserial_reg;
static struct console nwpserial_console = {
.name = "ttySQ",
.write = nwpserial_console_write,
.device = uart_console_device,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &nwpserial_reg,
};
#define NWPSERIAL_CONSOLE (&nwpserial_console)
#else
#define NWPSERIAL_CONSOLE NULL
#endif /* CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE */
/**************************************************************************/
static int nwpserial_request_port(struct uart_port *port)
{
return 0;
}
static void nwpserial_release_port(struct uart_port *port)
{
/* N/A */
}
static void nwpserial_config_port(struct uart_port *port, int flags)
{
port->type = PORT_NWPSERIAL;
}
static irqreturn_t nwpserial_interrupt(int irq, void *dev_id)
{
struct nwpserial_port *up = dev_id;
struct tty_struct *tty = up->port.info->port.tty;
irqreturn_t ret;
unsigned int iir;
unsigned char ch;
spin_lock(&up->port.lock);
/* check if the uart was the interrupt source. */
iir = dcr_read(up->dcr_host, UART_IIR);
if (!iir) {
ret = IRQ_NONE;
goto out;
}
do {
up->port.icount.rx++;
ch = dcr_read(up->dcr_host, UART_RX);
if (up->port.ignore_status_mask != NWPSERIAL_STATUS_RXVALID)
tty_insert_flip_char(tty, ch, TTY_NORMAL);
} while (dcr_read(up->dcr_host, UART_RX) & UART_LSR_DR);
tty_flip_buffer_push(tty);
ret = IRQ_HANDLED;
out:
spin_unlock(&up->port.lock);
return ret;
}
static int nwpserial_startup(struct uart_port *port)
{
struct nwpserial_port *up;
int err;
up = container_of(port, struct nwpserial_port, port);
/* disable flow control by default */
up->mcr = dcr_read(up->dcr_host, UART_MCR) & ~UART_MCR_AFE;
dcr_write(up->dcr_host, UART_MCR, up->mcr);
/* register interrupt handler */
err = request_irq(up->port.irq, nwpserial_interrupt,
IRQF_SHARED, "nwpserial", up);
if (err)
return err;
/* enable interrupts */
up->ier = UART_IER_RDI;
dcr_write(up->dcr_host, UART_IER, up->ier);
/* enable receiving */
up->port.ignore_status_mask &= ~NWPSERIAL_STATUS_RXVALID;
return 0;
}
static void nwpserial_shutdown(struct uart_port *port)
{
struct nwpserial_port *up;
up = container_of(port, struct nwpserial_port, port);
/* disable receiving */
up->port.ignore_status_mask |= NWPSERIAL_STATUS_RXVALID;
/* disable interrupts from this port */
up->ier = 0;
dcr_write(up->dcr_host, UART_IER, up->ier);
/* free irq */
free_irq(up->port.irq, port);
}
static int nwpserial_verify_port(struct uart_port *port,
struct serial_struct *ser)
{
return -EINVAL;
}
static const char *nwpserial_type(struct uart_port *port)
{
return port->type == PORT_NWPSERIAL ? "nwpserial" : NULL;
}
static void nwpserial_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old)
{
struct nwpserial_port *up;
up = container_of(port, struct nwpserial_port, port);
up->port.read_status_mask = NWPSERIAL_STATUS_RXVALID
| NWPSERIAL_STATUS_TXFULL;
up->port.ignore_status_mask = 0;
/* ignore all characters if CREAD is not set */
if ((termios->c_cflag & CREAD) == 0)
up->port.ignore_status_mask |= NWPSERIAL_STATUS_RXVALID;
/* Copy back the old hardware settings */
if (old)
tty_termios_copy_hw(termios, old);
}
static void nwpserial_break_ctl(struct uart_port *port, int ctl)
{
/* N/A */
}
static void nwpserial_enable_ms(struct uart_port *port)
{
/* N/A */
}
static void nwpserial_stop_rx(struct uart_port *port)
{
struct nwpserial_port *up;
up = container_of(port, struct nwpserial_port, port);
/* don't forward any more data (like !CREAD) */
up->port.ignore_status_mask = NWPSERIAL_STATUS_RXVALID;
}
static void nwpserial_putchar(struct nwpserial_port *up, unsigned char c)
{
/* check if tx buffer is full */
wait_for_bits(up, UART_LSR_THRE);
dcr_write(up->dcr_host, UART_TX, c);
up->port.icount.tx++;
}
static void nwpserial_start_tx(struct uart_port *port)
{
struct nwpserial_port *up;
struct circ_buf *xmit;
up = container_of(port, struct nwpserial_port, port);
xmit = &up->port.info->xmit;
if (port->x_char) {
nwpserial_putchar(up, up->port.x_char);
port->x_char = 0;
}
while (!(uart_circ_empty(xmit) || uart_tx_stopped(&up->port))) {
nwpserial_putchar(up, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1);
}
}
static unsigned int nwpserial_get_mctrl(struct uart_port *port)
{
return 0;
}
static void nwpserial_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
/* N/A */
}
static void nwpserial_stop_tx(struct uart_port *port)
{
/* N/A */
}
static unsigned int nwpserial_tx_empty(struct uart_port *port)
{
struct nwpserial_port *up;
unsigned long flags;
int ret;
up = container_of(port, struct nwpserial_port, port);
spin_lock_irqsave(&up->port.lock, flags);
ret = dcr_read(up->dcr_host, UART_LSR);
spin_unlock_irqrestore(&up->port.lock, flags);
return ret & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
}
static struct uart_ops nwpserial_pops = {
.tx_empty = nwpserial_tx_empty,
.set_mctrl = nwpserial_set_mctrl,
.get_mctrl = nwpserial_get_mctrl,
.stop_tx = nwpserial_stop_tx,
.start_tx = nwpserial_start_tx,
.stop_rx = nwpserial_stop_rx,
.enable_ms = nwpserial_enable_ms,
.break_ctl = nwpserial_break_ctl,
.startup = nwpserial_startup,
.shutdown = nwpserial_shutdown,
.set_termios = nwpserial_set_termios,
.type = nwpserial_type,
.release_port = nwpserial_release_port,
.request_port = nwpserial_request_port,
.config_port = nwpserial_config_port,
.verify_port = nwpserial_verify_port,
};
static struct uart_driver nwpserial_reg = {
.owner = THIS_MODULE,
.driver_name = "nwpserial",
.dev_name = "ttySQ",
.major = TTY_MAJOR,
.minor = 68,
.nr = NWPSERIAL_NR,
.cons = NWPSERIAL_CONSOLE,
};
int nwpserial_register_port(struct uart_port *port)
{
struct nwpserial_port *up = NULL;
int ret = -1;
int i;
static int first = 1;
int dcr_len;
int dcr_base;
struct device_node *dn;
mutex_lock(&nwpserial_mutex);
dn = to_of_device(port->dev)->node;
if (dn == NULL)
goto out;
/* get dcr base. */
dcr_base = dcr_resource_start(dn, 0);
/* find matching entry */
for (i = 0; i < NWPSERIAL_NR; i++)
if (nwpserial_ports[i].port.iobase == dcr_base) {
up = &nwpserial_ports[i];
break;
}
/* we didn't find a mtching entry, search for a free port */
if (up == NULL)
for (i = 0; i < NWPSERIAL_NR; i++)
if (nwpserial_ports[i].port.type == PORT_UNKNOWN &&
nwpserial_ports[i].port.iobase == 0) {
up = &nwpserial_ports[i];
break;
}
if (up == NULL) {
ret = -EBUSY;
goto out;
}
if (first)
uart_register_driver(&nwpserial_reg);
first = 0;
up->port.membase = port->membase;
up->port.irq = port->irq;
up->port.uartclk = port->uartclk;
up->port.fifosize = port->fifosize;
up->port.regshift = port->regshift;
up->port.iotype = port->iotype;
up->port.flags = port->flags;
up->port.mapbase = port->mapbase;
up->port.private_data = port->private_data;
if (port->dev)
up->port.dev = port->dev;
if (up->port.iobase != dcr_base) {
up->port.ops = &nwpserial_pops;
up->port.fifosize = 16;
spin_lock_init(&up->port.lock);
up->port.iobase = dcr_base;
dcr_len = dcr_resource_len(dn, 0);
up->dcr_host = dcr_map(dn, dcr_base, dcr_len);
if (!DCR_MAP_OK(up->dcr_host)) {
printk(KERN_ERR "Cannot map DCR resources for NWPSERIAL");
goto out;
}
}
ret = uart_add_one_port(&nwpserial_reg, &up->port);
if (ret == 0)
ret = up->port.line;
out:
mutex_unlock(&nwpserial_mutex);
return ret;
}
EXPORT_SYMBOL(nwpserial_register_port);
void nwpserial_unregister_port(int line)
{
struct nwpserial_port *up = &nwpserial_ports[line];
mutex_lock(&nwpserial_mutex);
uart_remove_one_port(&nwpserial_reg, &up->port);
up->port.type = PORT_UNKNOWN;
mutex_unlock(&nwpserial_mutex);
}
EXPORT_SYMBOL(nwpserial_unregister_port);
#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE
static int __init nwpserial_console_init(void)
{
struct nwpserial_port *up = NULL;
struct device_node *dn;
const char *name;
int dcr_base;
int dcr_len;
int i;
/* search for a free port */
for (i = 0; i < NWPSERIAL_NR; i++)
if (nwpserial_ports[i].port.type == PORT_UNKNOWN) {
up = &nwpserial_ports[i];
break;
}
if (up == NULL)
return -1;
name = of_get_property(of_chosen, "linux,stdout-path", NULL);
if (name == NULL)
return -1;
dn = of_find_node_by_path(name);
if (!dn)
return -1;
spin_lock_init(&up->port.lock);
up->port.ops = &nwpserial_pops;
up->port.type = PORT_NWPSERIAL;
up->port.fifosize = 16;
dcr_base = dcr_resource_start(dn, 0);
dcr_len = dcr_resource_len(dn, 0);
up->port.iobase = dcr_base;
up->dcr_host = dcr_map(dn, dcr_base, dcr_len);
if (!DCR_MAP_OK(up->dcr_host)) {
printk("Cannot map DCR resources for SERIAL");
return -1;
}
register_console(&nwpserial_console);
return 0;
}
console_initcall(nwpserial_console_init);
#endif /* CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE */
......@@ -14,6 +14,7 @@
#include <linux/serial_core.h>
#include <linux/serial_8250.h>
#include <linux/of_platform.h>
#include <linux/nwpserial.h>
#include <asm/prom.h>
......@@ -99,9 +100,16 @@ static int __devinit of_platform_serial_probe(struct of_device *ofdev,
goto out;
switch (port_type) {
#ifdef CONFIG_SERIAL_8250
case PORT_8250 ... PORT_MAX_8250:
ret = serial8250_register_port(&port);
break;
#endif
#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
case PORT_NWPSERIAL:
ret = nwpserial_register_port(&port);
break;
#endif
default:
/* need to add code for these */
case PORT_UNKNOWN:
......@@ -129,9 +137,16 @@ static int of_platform_serial_remove(struct of_device *ofdev)
{
struct of_serial_info *info = ofdev->dev.driver_data;
switch (info->type) {
#ifdef CONFIG_SERIAL_8250
case PORT_8250 ... PORT_MAX_8250:
serial8250_unregister_port(info->line);
break;
#endif
#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
case PORT_NWPSERIAL:
nwpserial_unregister_port(info->line);
break;
#endif
default:
/* need to add code for these */
break;
......@@ -148,6 +163,10 @@ static struct of_device_id __devinitdata of_platform_serial_table[] = {
{ .type = "serial", .compatible = "ns16450", .data = (void *)PORT_16450, },
{ .type = "serial", .compatible = "ns16550", .data = (void *)PORT_16550, },
{ .type = "serial", .compatible = "ns16750", .data = (void *)PORT_16750, },
#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
{ .type = "serial", .compatible = "ibm,qpace-nwp-serial",
.data = (void *)PORT_NWPSERIAL, },
#endif
{ .type = "serial", .data = (void *)PORT_UNKNOWN, },
{ /* end of list */ },
};
......
/*
* Serial Port driver for a NWP uart device
*
* Copyright (C) 2008 IBM Corp., Benjamin Krill <ben@codiert.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#ifndef _NWPSERIAL_H
#define _NWPSERIAL_H
int nwpserial_register_port(struct uart_port *port);
void nwpserial_unregister_port(int line);
#endif /* _NWPSERIAL_H */
......@@ -161,6 +161,9 @@
#define PORT_S3C6400 84
/* NWPSERIAL */
#define PORT_NWPSERIAL 85
#ifdef __KERNEL__
#include <linux/compiler.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment