Commit e4b9e2aa authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds

[PATCH] 2.5.21 x86-64 jumbo patch - arch specific changes

Here is the big 2.5.21 x86-64 sync patch. It only touches arch/x86_64
and include/asm-x86_64. It requires a few other changes that I'm sending
in separate mail.

Changes:
- merge wit 2.5.21
- merge from 2.5.21/i386 (new PCI code, new LDT code etc.)
- sync with 2.4-x86_64 tree.
- minor updates to 32bit emulation
- better early console; including serial support.
- now set up dummy PDA for booting to avoid problems
- Fix GS reloading in context switch one instruction race
- Remove hardcoded names from mpparse code
- Fix inline assembly for RAID-5 xor (similar change needed for i386)
- Real per cpu data support based on PDA field
- Cleanup of offset.c generation requested by Kai: it only puts structure
  offsets into offset.h now.
- Fix i387 fxsave signal frame problems.
- Add uname emulation via personality ("linux32")
- New SSE optimized checksum-copy, copy*user, memcpy, clear_page, copy_page
  functions. Other tunings/cleanups in checksum and other user memory
  access function.
- check if exception table is really sorted
- Cleanups in page table handling in preparation of non executable pages
  support.
- Cleanup PDA access to not require offset.h (thanks to kai for kicking me
  to this)
- use long long for u64/s64 to avoid more warnings
- remove CONFIG_ISA
- fix various bugs and other cleanups
parent cc9af0c5
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
# #
# early bootup linking needs 32bit. You can either use real 32bit tools # early bootup linking needs 32bit. You can either use real 32bit tools
# here or 64bit tools switch to 32bit mode. # here or 64bit tools in 32bit mode.
# #
IA32_CC := $(CROSS_COMPILE)gcc -m32 -O2 -fomit-frame-pointer -nostdinc -I $(HPATH) IA32_CC := $(CROSS_COMPILE)gcc -m32 -O2 -fomit-frame-pointer -nostdinc -I $(HPATH)
IA32_LD := $(CROSS_COMPILE)ld -m elf_i386 IA32_LD := $(CROSS_COMPILE)ld -m elf_i386
...@@ -41,11 +41,12 @@ LINKFLAGS =-T $(TOPDIR)/arch/x86_64/vmlinux.lds $(LDFLAGS) ...@@ -41,11 +41,12 @@ LINKFLAGS =-T $(TOPDIR)/arch/x86_64/vmlinux.lds $(LDFLAGS)
CFLAGS += -mno-red-zone CFLAGS += -mno-red-zone
CFLAGS += -mcmodel=kernel CFLAGS += -mcmodel=kernel
CFLAGS += -pipe CFLAGS += -pipe
# this makes reading assembly source easier
CFLAGS += -fno-reorder-blocks CFLAGS += -fno-reorder-blocks
# needed for later gcc 3.1 # needed for later gcc 3.1
CFLAGS += -finline-limit=2000 CFLAGS += -finline-limit=2000
# needed for earlier gcc 3.1 # needed for earlier gcc 3.1
CFLAGS += -fno-strength-reduce #CFLAGS += -fno-strength-reduce
#CFLAGS += -g #CFLAGS += -g
# prevent gcc from keeping the stack 16 byte aligned (FIXME) # prevent gcc from keeping the stack 16 byte aligned (FIXME)
...@@ -63,9 +64,9 @@ SUBDIRS += arch/x86_64/ia32 ...@@ -63,9 +64,9 @@ SUBDIRS += arch/x86_64/ia32
CORE_FILES += arch/x86_64/ia32/ia32.o CORE_FILES += arch/x86_64/ia32/ia32.o
endif endif
ifdef CONFIG_HOSTFS ifdef CONFIG_PCI
SUBDIRS += arch/x86_64/hostfs SUBDIRS += arch/x86_64/pci
core-$(CONFIG_HOSTFS) += arch/x86_64/hostfs/hostfs.o DRIVERS += arch/x86_64/pci/pci.o
endif endif
CORE_FILES += $(core-y) CORE_FILES += $(core-y)
...@@ -77,7 +78,7 @@ vmlinux: arch/x86_64/vmlinux.lds ...@@ -77,7 +78,7 @@ vmlinux: arch/x86_64/vmlinux.lds
.PHONY: zImage bzImage compressed zlilo bzlilo zdisk bzdisk install \ .PHONY: zImage bzImage compressed zlilo bzlilo zdisk bzdisk install \
clean archclean archmrproper archdep checkoffset clean archclean archmrproper archdep checkoffset
checkoffset: FORCE checkoffset: FORCE include/asm
make -C arch/$(ARCH)/tools $(TOPDIR)/include/asm-x86_64/offset.h make -C arch/$(ARCH)/tools $(TOPDIR)/include/asm-x86_64/offset.h
bzImage: checkoffset vmlinux bzImage: checkoffset vmlinux
......
...@@ -452,7 +452,7 @@ no_psmouse: ...@@ -452,7 +452,7 @@ no_psmouse:
cmpw $0, %cs:realmode_swtch cmpw $0, %cs:realmode_swtch
jz rmodeswtch_normal jz rmodeswtch_normal
lcall %cs:realmode_swtch lcall *%cs:realmode_swtch
jmp rmodeswtch_end jmp rmodeswtch_end
......
...@@ -437,6 +437,7 @@ setalias: ...@@ -437,6 +437,7 @@ setalias:
# Setting of user mode (AX=mode ID) => CF=success # Setting of user mode (AX=mode ID) => CF=success
mode_set: mode_set:
movw %ax, %fs:(0x01fa)
movw %ax, %bx movw %ax, %bx
cmpb $0xff, %ah cmpb $0xff, %ah
jz setalias jz setalias
......
...@@ -2,12 +2,17 @@ ...@@ -2,12 +2,17 @@
# For a description of the syntax of this configuration file, # For a description of the syntax of this configuration file,
# see Documentation/kbuild/config-language.txt. # see Documentation/kbuild/config-language.txt.
# #
# Note: ISA is disabled and will hopefully never be enabled.
# If you managed to buy an ISA x86-64 box you'll have to fix all the
# ISA drivers you need yourself.
#
mainmenu_name "Linux Kernel Configuration" mainmenu_name "Linux Kernel Configuration"
define_bool CONFIG_X86_64 y define_bool CONFIG_X86_64 y
define_bool CONFIG_X86 y define_bool CONFIG_X86 y
define_bool CONFIG_ISA y define_bool CONFIG_ISA n
define_bool CONFIG_SBUS n define_bool CONFIG_SBUS n
define_bool CONFIG_UID16 y define_bool CONFIG_UID16 y
...@@ -22,7 +27,8 @@ source init/Config.in ...@@ -22,7 +27,8 @@ source init/Config.in
mainmenu_option next_comment mainmenu_option next_comment
comment 'Processor type and features' comment 'Processor type and features'
choice 'Processor family' \ choice 'Processor family' \
"AMD-Hammer CONFIG_MK8" CONFIG_MK8 "AMD-Hammer CONFIG_MK8 \
Generic-x86-64 CONFIG_GENERIC_CPU" AMD-Hammer
# #
# Define implied options from the CPU selection here # Define implied options from the CPU selection here
...@@ -44,8 +50,10 @@ define_bool CONFIG_X86_LOCAL_APIC y ...@@ -44,8 +50,10 @@ define_bool CONFIG_X86_LOCAL_APIC y
#currently broken: #currently broken:
#bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR #bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR
bool 'Symmetric multi-processing support' CONFIG_SMP bool 'Symmetric multi-processing support' CONFIG_SMP
bool 'Preemptible Kernel' CONFIG_PREEMPT if [ "$CONFIG_SMP" = "n" ]; then
if [ "$CONFIG_SMP" = "y" -a "$CONFIG_X86_CMPXCHG" = "y" ]; then bool 'Preemptible Kernel' CONFIG_PREEMPT
fi
if [ "$CONFIG_SMP" = "y" ]; then
define_bool CONFIG_HAVE_DEC_LOCK y define_bool CONFIG_HAVE_DEC_LOCK y
fi fi
...@@ -56,10 +64,18 @@ endmenu ...@@ -56,10 +64,18 @@ endmenu
mainmenu_option next_comment mainmenu_option next_comment
comment 'General options' comment 'Power management options'
bool 'Power Management support' CONFIG_PM
source drivers/acpi/Config.in source drivers/acpi/Config.in
endmenu
mainmenu_option next_comment
comment 'Bus options (PCI etc.)'
bool 'PCI support' CONFIG_PCI bool 'PCI support' CONFIG_PCI
if [ "$CONFIG_PCI" = "y" ]; then if [ "$CONFIG_PCI" = "y" ]; then
# x86-64 doesn't support PCI BIOS access from long mode so always go direct. # x86-64 doesn't support PCI BIOS access from long mode so always go direct.
...@@ -77,6 +93,12 @@ else ...@@ -77,6 +93,12 @@ else
define_bool CONFIG_PCMCIA n define_bool CONFIG_PCMCIA n
fi fi
endmenu
mainmenu_option next_comment
comment 'Executable file formats / Emulations'
if [ "$CONFIG_PROC_FS" = "y" ]; then if [ "$CONFIG_PROC_FS" = "y" ]; then
define_bool CONFIG_KCORE_ELF y define_bool CONFIG_KCORE_ELF y
fi fi
...@@ -84,8 +106,6 @@ fi ...@@ -84,8 +106,6 @@ fi
tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF
tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC
bool 'Power Management support' CONFIG_PM
bool 'IA32 Emulation' CONFIG_IA32_EMULATION bool 'IA32 Emulation' CONFIG_IA32_EMULATION
endmenu endmenu
...@@ -94,7 +114,7 @@ source drivers/mtd/Config.in ...@@ -94,7 +114,7 @@ source drivers/mtd/Config.in
source drivers/parport/Config.in source drivers/parport/Config.in
source drivers/pnp/Config.in #source drivers/pnp/Config.in
source drivers/block/Config.in source drivers/block/Config.in
...@@ -142,9 +162,10 @@ if [ "$CONFIG_NET" = "y" ]; then ...@@ -142,9 +162,10 @@ if [ "$CONFIG_NET" = "y" ]; then
bool 'Network device support' CONFIG_NETDEVICES bool 'Network device support' CONFIG_NETDEVICES
if [ "$CONFIG_NETDEVICES" = "y" ]; then if [ "$CONFIG_NETDEVICES" = "y" ]; then
source drivers/net/Config.in source drivers/net/Config.in
if [ "$CONFIG_ATM" = "y" ]; then # ATM seems to be largely 64bit unsafe and also unmaintained - disable it for now.
source drivers/atm/Config.in # if [ "$CONFIG_ATM" = "y" ]; then
fi # source drivers/atm/Config.in
# fi
fi fi
endmenu endmenu
fi fi
...@@ -155,14 +176,7 @@ source net/irda/Config.in ...@@ -155,14 +176,7 @@ source net/irda/Config.in
source drivers/isdn/Config.in source drivers/isdn/Config.in
mainmenu_option next_comment # no support for non IDE/SCSI cdroms as they were all ISA only
comment 'Old CD-ROM drivers (not SCSI, not IDE)'
bool 'Support non-SCSI/IDE/ATAPI CDROM drives' CONFIG_CD_NO_IDESCSI
if [ "$CONFIG_CD_NO_IDESCSI" != "n" ]; then
source drivers/cdrom/Config.in
fi
endmenu
# #
# input before char - char/joystick depends on it. As does USB. # input before char - char/joystick depends on it. As does USB.
......
# #
# Automatically generated make config: don't edit # Automatically generated by make menuconfig: don't edit
# #
CONFIG_X86_64=y CONFIG_X86_64=y
CONFIG_X86=y CONFIG_X86=y
CONFIG_ISA=y # CONFIG_ISA is not set
# CONFIG_SBUS is not set # CONFIG_SBUS is not set
CONFIG_UID16=y CONFIG_UID16=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y CONFIG_RWSEM_GENERIC_SPINLOCK=y
...@@ -35,6 +35,7 @@ CONFIG_MODULES=y ...@@ -35,6 +35,7 @@ CONFIG_MODULES=y
# Processor type and features # Processor type and features
# #
CONFIG_MK8=y CONFIG_MK8=y
# CONFIG_GENERIC_CPU is not set
CONFIG_X86_L1_CACHE_BYTES=64 CONFIG_X86_L1_CACHE_BYTES=64
CONFIG_X86_L1_CACHE_SHIFT=6 CONFIG_X86_L1_CACHE_SHIFT=6
CONFIG_X86_TSC=y CONFIG_X86_TSC=y
...@@ -47,28 +48,35 @@ CONFIG_X86_CPUID=y ...@@ -47,28 +48,35 @@ CONFIG_X86_CPUID=y
CONFIG_X86_IO_APIC=y CONFIG_X86_IO_APIC=y
CONFIG_X86_LOCAL_APIC=y CONFIG_X86_LOCAL_APIC=y
CONFIG_SMP=y CONFIG_SMP=y
# CONFIG_PREEMPT is not set
CONFIG_HAVE_DEC_LOCK=y CONFIG_HAVE_DEC_LOCK=y
CONFIG_X86_MCE=y CONFIG_X86_MCE=y
# CONFIG_X86_MCE_NONFATAL is not set # CONFIG_X86_MCE_NONFATAL is not set
# #
# General options # Power management options
# #
# CONFIG_PM is not set
# #
# ACPI Support # ACPI Support
# #
# CONFIG_ACPI is not set # CONFIG_ACPI is not set
#
# Bus options (PCI etc.)
#
CONFIG_PCI=y CONFIG_PCI=y
CONFIG_PCI_DIRECT=y CONFIG_PCI_DIRECT=y
# CONFIG_PCI_NAMES is not set # CONFIG_PCI_NAMES is not set
# CONFIG_HOTPLUG is not set # CONFIG_HOTPLUG is not set
# CONFIG_PCMCIA is not set # CONFIG_PCMCIA is not set
#
# Executable file formats / Emulations
#
CONFIG_KCORE_ELF=y CONFIG_KCORE_ELF=y
CONFIG_BINFMT_ELF=y CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set # CONFIG_BINFMT_MISC is not set
CONFIG_PM=y
CONFIG_IA32_EMULATION=y CONFIG_IA32_EMULATION=y
# #
...@@ -81,27 +89,22 @@ CONFIG_IA32_EMULATION=y ...@@ -81,27 +89,22 @@ CONFIG_IA32_EMULATION=y
# #
# CONFIG_PARPORT is not set # CONFIG_PARPORT is not set
#
# Plug and Play configuration
#
# CONFIG_PNP is not set
# CONFIG_ISAPNP is not set
# CONFIG_PNPBIOS is not set
# #
# Block devices # Block devices
# #
# CONFIG_BLK_DEV_FD is not set CONFIG_BLK_DEV_FD=y
# CONFIG_BLK_DEV_XD is not set # CONFIG_BLK_DEV_XD is not set
# CONFIG_PARIDE is not set # CONFIG_PARIDE is not set
# CONFIG_BLK_CPQ_DA is not set # CONFIG_BLK_CPQ_DA is not set
# CONFIG_BLK_CPQ_CISS_DA is not set # CONFIG_BLK_CPQ_CISS_DA is not set
# CONFIG_CISS_SCSI_TAPE is not set # CONFIG_CISS_SCSI_TAPE is not set
# CONFIG_BLK_DEV_DAC960 is not set # CONFIG_BLK_DEV_DAC960 is not set
# CONFIG_BLK_DEV_LOOP is not set # CONFIG_BLK_DEV_UMEM is not set
CONFIG_BLK_DEV_LOOP=y
# CONFIG_BLK_DEV_NBD is not set # CONFIG_BLK_DEV_NBD is not set
# CONFIG_BLK_DEV_RAM is not set CONFIG_BLK_DEV_RAM=y
# CONFIG_BLK_DEV_INITRD is not set CONFIG_BLK_DEV_RAM_SIZE=4096
CONFIG_BLK_DEV_INITRD=y
# #
# Multi-device support (RAID and LVM) # Multi-device support (RAID and LVM)
...@@ -118,17 +121,19 @@ CONFIG_IA32_EMULATION=y ...@@ -118,17 +121,19 @@ CONFIG_IA32_EMULATION=y
# #
# Networking options # Networking options
# #
# CONFIG_PACKET is not set CONFIG_PACKET=y
# CONFIG_NETLINK_DEV is not set # CONFIG_PACKET_MMAP is not set
CONFIG_NETLINK_DEV=y
# CONFIG_NETFILTER is not set # CONFIG_NETFILTER is not set
# CONFIG_FILTER is not set CONFIG_FILTER=y
CONFIG_UNIX=y CONFIG_UNIX=y
CONFIG_INET=y CONFIG_INET=y
# CONFIG_IP_MULTICAST is not set CONFIG_IP_MULTICAST=y
# CONFIG_IP_ADVANCED_ROUTER is not set # CONFIG_IP_ADVANCED_ROUTER is not set
# CONFIG_IP_PNP is not set # CONFIG_IP_PNP is not set
# CONFIG_NET_IPIP is not set # CONFIG_NET_IPIP is not set
# CONFIG_NET_IPGRE is not set # CONFIG_NET_IPGRE is not set
# CONFIG_IP_MROUTE is not set
# CONFIG_ARPD is not set # CONFIG_ARPD is not set
# CONFIG_INET_ECN is not set # CONFIG_INET_ECN is not set
# CONFIG_SYN_COOKIES is not set # CONFIG_SYN_COOKIES is not set
...@@ -136,12 +141,13 @@ CONFIG_INET=y ...@@ -136,12 +141,13 @@ CONFIG_INET=y
# CONFIG_KHTTPD is not set # CONFIG_KHTTPD is not set
# CONFIG_ATM is not set # CONFIG_ATM is not set
# CONFIG_VLAN_8021Q is not set # CONFIG_VLAN_8021Q is not set
# CONFIG_IPX is not set
# CONFIG_ATALK is not set
# #
# Appletalk devices
# #
# # CONFIG_DEV_APPLETALK is not set
# CONFIG_IPX is not set
# CONFIG_ATALK is not set
# CONFIG_DECNET is not set # CONFIG_DECNET is not set
# CONFIG_BRIDGE is not set # CONFIG_BRIDGE is not set
# CONFIG_X25 is not set # CONFIG_X25 is not set
...@@ -174,10 +180,6 @@ CONFIG_IDE=y ...@@ -174,10 +180,6 @@ CONFIG_IDE=y
# ATA and ATAPI Block devices # ATA and ATAPI Block devices
# #
CONFIG_BLK_DEV_IDE=y CONFIG_BLK_DEV_IDE=y
#
# Please see Documentation/ide.txt for help/info on IDE drives
#
# CONFIG_BLK_DEV_HD_IDE is not set # CONFIG_BLK_DEV_HD_IDE is not set
# CONFIG_BLK_DEV_HD is not set # CONFIG_BLK_DEV_HD is not set
CONFIG_BLK_DEV_IDEDISK=y CONFIG_BLK_DEV_IDEDISK=y
...@@ -188,10 +190,6 @@ CONFIG_BLK_DEV_IDECD=y ...@@ -188,10 +190,6 @@ CONFIG_BLK_DEV_IDECD=y
# CONFIG_BLK_DEV_IDETAPE is not set # CONFIG_BLK_DEV_IDETAPE is not set
# CONFIG_BLK_DEV_IDEFLOPPY is not set # CONFIG_BLK_DEV_IDEFLOPPY is not set
# CONFIG_BLK_DEV_IDESCSI is not set # CONFIG_BLK_DEV_IDESCSI is not set
#
# IDE chipset support
#
# CONFIG_BLK_DEV_CMD640 is not set # CONFIG_BLK_DEV_CMD640 is not set
# CONFIG_BLK_DEV_CMD640_ENHANCED is not set # CONFIG_BLK_DEV_CMD640_ENHANCED is not set
# CONFIG_BLK_DEV_ISAPNP is not set # CONFIG_BLK_DEV_ISAPNP is not set
...@@ -199,17 +197,17 @@ CONFIG_BLK_DEV_IDECD=y ...@@ -199,17 +197,17 @@ CONFIG_BLK_DEV_IDECD=y
# CONFIG_BLK_DEV_OFFBOARD is not set # CONFIG_BLK_DEV_OFFBOARD is not set
# CONFIG_IDEPCI_SHARE_IRQ is not set # CONFIG_IDEPCI_SHARE_IRQ is not set
CONFIG_BLK_DEV_IDEDMA_PCI=y CONFIG_BLK_DEV_IDEDMA_PCI=y
CONFIG_IDEDMA_PCI_AUTO=y # CONFIG_IDEDMA_PCI_AUTO is not set
# CONFIG_IDEDMA_ONLYDISK is not set # CONFIG_IDEDMA_ONLYDISK is not set
CONFIG_BLK_DEV_IDEDMA=y CONFIG_BLK_DEV_IDEDMA=y
# CONFIG_BLK_DEV_IDE_TCQ is not set # CONFIG_BLK_DEV_IDE_TCQ is not set
# CONFIG_BLK_DEV_IDE_TCQ_DEFAULT is not set # CONFIG_BLK_DEV_IDE_TCQ_DEFAULT is not set
# CONFIG_IDEDMA_NEW_DRIVE_LISTINGS is not set # CONFIG_IDEDMA_NEW_DRIVE_LISTINGS is not set
# CONFIG_BLK_DEV_AEC62XX is not set # CONFIG_BLK_DEV_AEC62XX is not set
# CONFIG_AEC62XX_TUNING is not set # CONFIG_AEC6280_BURST is not set
# CONFIG_BLK_DEV_ALI15X3 is not set # CONFIG_BLK_DEV_ALI15X3 is not set
# CONFIG_WDC_ALI15X3 is not set # CONFIG_WDC_ALI15X3 is not set
# CONFIG_BLK_DEV_AMD74XX is not set CONFIG_BLK_DEV_AMD74XX=y
# CONFIG_BLK_DEV_CMD64X is not set # CONFIG_BLK_DEV_CMD64X is not set
# CONFIG_BLK_DEV_CY82C693 is not set # CONFIG_BLK_DEV_CY82C693 is not set
# CONFIG_BLK_DEV_CS5530 is not set # CONFIG_BLK_DEV_CS5530 is not set
...@@ -226,9 +224,11 @@ CONFIG_BLK_DEV_IDEDMA=y ...@@ -226,9 +224,11 @@ CONFIG_BLK_DEV_IDEDMA=y
# CONFIG_BLK_DEV_SIS5513 is not set # CONFIG_BLK_DEV_SIS5513 is not set
# CONFIG_BLK_DEV_TRM290 is not set # CONFIG_BLK_DEV_TRM290 is not set
# CONFIG_BLK_DEV_VIA82CXXX is not set # CONFIG_BLK_DEV_VIA82CXXX is not set
# CONFIG_BLK_DEV_SL82C105 is not set
# CONFIG_IDE_CHIPSETS is not set # CONFIG_IDE_CHIPSETS is not set
# CONFIG_IDEDMA_IVB is not set # CONFIG_IDEDMA_IVB is not set
CONFIG_IDEDMA_AUTO=y CONFIG_ATAPI=y
# CONFIG_IDEDMA_AUTO is not set
# CONFIG_BLK_DEV_ATARAID is not set # CONFIG_BLK_DEV_ATARAID is not set
# CONFIG_BLK_DEV_ATARAID_PDC is not set # CONFIG_BLK_DEV_ATARAID_PDC is not set
# CONFIG_BLK_DEV_ATARAID_HPT is not set # CONFIG_BLK_DEV_ATARAID_HPT is not set
...@@ -236,16 +236,67 @@ CONFIG_IDEDMA_AUTO=y ...@@ -236,16 +236,67 @@ CONFIG_IDEDMA_AUTO=y
# #
# SCSI support # SCSI support
# #
# CONFIG_SCSI is not set CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SD_EXTRA_DEVS=40
# CONFIG_CHR_DEV_ST is not set
# CONFIG_CHR_DEV_OSST is not set
# CONFIG_BLK_DEV_SR is not set
# CONFIG_CHR_DEV_SG is not set
# CONFIG_SCSI_MULTI_LUN is not set
# CONFIG_SCSI_REPORT_LUNS is not set
CONFIG_SCSI_CONSTANTS=y
# CONFIG_SCSI_LOGGING is not set
#
# SCSI low-level drivers
#
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
# CONFIG_SCSI_7000FASST is not set
# CONFIG_SCSI_ACARD is not set
CONFIG_SCSI_AIC7XXX=y
CONFIG_AIC7XXX_CMDS_PER_DEVICE=253
CONFIG_AIC7XXX_RESET_DELAY_MS=15000
# CONFIG_AIC7XXX_BUILD_FIRMWARE is not set
# CONFIG_SCSI_ADVANSYS is not set
# CONFIG_SCSI_IN2000 is not set
# CONFIG_SCSI_MEGARAID is not set
# CONFIG_SCSI_BUSLOGIC is not set
# CONFIG_SCSI_CPQFCTS is not set
# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_DTC3280 is not set
# CONFIG_SCSI_EATA is not set
# CONFIG_SCSI_EATA_DMA is not set
# CONFIG_SCSI_EATA_PIO is not set
# CONFIG_SCSI_FUTURE_DOMAIN is not set
# CONFIG_SCSI_GDTH is not set
# CONFIG_SCSI_GENERIC_NCR5380 is not set
# CONFIG_SCSI_IPS is not set
# CONFIG_SCSI_INITIO is not set
# CONFIG_SCSI_INIA100 is not set
# CONFIG_SCSI_NCR53C406A is not set
# CONFIG_SCSI_NCR53C7xx is not set
# CONFIG_SCSI_SYM53C8XX_2 is not set
# CONFIG_SCSI_NCR53C8XX is not set
# CONFIG_SCSI_SYM53C8XX is not set
# CONFIG_SCSI_PAS16 is not set
# CONFIG_SCSI_PCI2000 is not set
# CONFIG_SCSI_PCI2220I is not set
# CONFIG_SCSI_PSI240I is not set
# CONFIG_SCSI_QLOGIC_FAS is not set
# CONFIG_SCSI_QLOGIC_ISP is not set
# CONFIG_SCSI_QLOGIC_FC is not set
# CONFIG_SCSI_QLOGIC_1280 is not set
# CONFIG_SCSI_SYM53C416 is not set
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_T128 is not set
# CONFIG_SCSI_U14_34F is not set
# CONFIG_SCSI_DEBUG is not set
# #
# Fusion MPT device support # Fusion MPT device support
# #
# CONFIG_FUSION is not set # CONFIG_FUSION is not set
# CONFIG_FUSION_BOOT is not set
# CONFIG_FUSION_ISENSE is not set
# CONFIG_FUSION_CTL is not set
# CONFIG_FUSION_LAN is not set
# #
# IEEE 1394 (FireWire) support (EXPERIMENTAL) # IEEE 1394 (FireWire) support (EXPERIMENTAL)
...@@ -255,7 +306,83 @@ CONFIG_IDEDMA_AUTO=y ...@@ -255,7 +306,83 @@ CONFIG_IDEDMA_AUTO=y
# #
# Network device support # Network device support
# #
# CONFIG_NETDEVICES is not set CONFIG_NETDEVICES=y
#
# ARCnet devices
#
# CONFIG_ARCNET is not set
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
# CONFIG_ETHERTAP is not set
#
# Ethernet (10 or 100Mbit)
#
CONFIG_NET_ETHERNET=y
# CONFIG_SUNLANCE is not set
# CONFIG_HAPPYMEAL is not set
# CONFIG_SUNBMAC is not set
# CONFIG_SUNQE is not set
# CONFIG_SUNGEM is not set
CONFIG_NET_VENDOR_3COM=y
# CONFIG_EL1 is not set
# CONFIG_EL2 is not set
# CONFIG_ELPLUS is not set
# CONFIG_EL16 is not set
# CONFIG_ELMC is not set
# CONFIG_ELMC_II is not set
CONFIG_VORTEX=y
# CONFIG_LANCE is not set
# CONFIG_NET_VENDOR_SMC is not set
# CONFIG_NET_VENDOR_RACAL is not set
# CONFIG_HP100 is not set
# CONFIG_NET_ISA is not set
# CONFIG_NET_PCI is not set
# CONFIG_NET_POCKET is not set
#
# Ethernet (1000 Mbit)
#
# CONFIG_ACENIC is not set
# CONFIG_DL2K is not set
# CONFIG_E1000 is not set
# CONFIG_MYRI_SBUS is not set
# CONFIG_NS83820 is not set
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
# CONFIG_SK98LIN is not set
CONFIG_TIGON3=y
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
# CONFIG_PLIP is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
#
# Wireless LAN (non-hamradio)
#
# CONFIG_NET_RADIO is not set
#
# Token Ring devices
#
# CONFIG_TR is not set
# CONFIG_NET_FC is not set
# CONFIG_RCPCI is not set
# CONFIG_SHAPER is not set
#
# Wan interfaces
#
# CONFIG_WAN is not set
#
# Tulip family network device support
#
# CONFIG_NET_TULIP is not set
# #
# Amateur Radio support # Amateur Radio support
...@@ -272,11 +399,6 @@ CONFIG_IDEDMA_AUTO=y ...@@ -272,11 +399,6 @@ CONFIG_IDEDMA_AUTO=y
# #
# CONFIG_ISDN_BOOL is not set # CONFIG_ISDN_BOOL is not set
#
# Old CD-ROM drivers (not SCSI, not IDE)
#
# CONFIG_CD_NO_IDESCSI is not set
# #
# Input device support # Input device support
# #
...@@ -329,7 +451,7 @@ CONFIG_PSMOUSE=y ...@@ -329,7 +451,7 @@ CONFIG_PSMOUSE=y
# CONFIG_WATCHDOG is not set # CONFIG_WATCHDOG is not set
# CONFIG_INTEL_RNG is not set # CONFIG_INTEL_RNG is not set
# CONFIG_NVRAM is not set # CONFIG_NVRAM is not set
# CONFIG_RTC is not set CONFIG_RTC=y
# CONFIG_DTLK is not set # CONFIG_DTLK is not set
# CONFIG_R3964 is not set # CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set # CONFIG_APPLICOM is not set
...@@ -356,9 +478,11 @@ CONFIG_PSMOUSE=y ...@@ -356,9 +478,11 @@ CONFIG_PSMOUSE=y
# File systems # File systems
# #
# CONFIG_QUOTA is not set # CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set # CONFIG_QFMT_V1 is not set
# CONFIG_QFMT_V2 is not set
CONFIG_AUTOFS_FS=y
# CONFIG_AUTOFS4_FS is not set # CONFIG_AUTOFS4_FS is not set
# CONFIG_REISERFS_FS is not set CONFIG_REISERFS_FS=y
# CONFIG_REISERFS_CHECK is not set # CONFIG_REISERFS_CHECK is not set
# CONFIG_REISERFS_PROC_INFO is not set # CONFIG_REISERFS_PROC_INFO is not set
# CONFIG_ADFS_FS is not set # CONFIG_ADFS_FS is not set
...@@ -366,8 +490,8 @@ CONFIG_PSMOUSE=y ...@@ -366,8 +490,8 @@ CONFIG_PSMOUSE=y
# CONFIG_AFFS_FS is not set # CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set # CONFIG_HFS_FS is not set
# CONFIG_BFS_FS is not set # CONFIG_BFS_FS is not set
# CONFIG_EXT3_FS is not set CONFIG_EXT3_FS=y
# CONFIG_JBD is not set CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set # CONFIG_JBD_DEBUG is not set
# CONFIG_FAT_FS is not set # CONFIG_FAT_FS is not set
# CONFIG_MSDOS_FS is not set # CONFIG_MSDOS_FS is not set
...@@ -377,7 +501,7 @@ CONFIG_PSMOUSE=y ...@@ -377,7 +501,7 @@ CONFIG_PSMOUSE=y
# CONFIG_JFFS_FS is not set # CONFIG_JFFS_FS is not set
# CONFIG_JFFS2_FS is not set # CONFIG_JFFS2_FS is not set
# CONFIG_CRAMFS is not set # CONFIG_CRAMFS is not set
# CONFIG_TMPFS is not set CONFIG_TMPFS=y
CONFIG_RAMFS=y CONFIG_RAMFS=y
# CONFIG_ISO9660_FS is not set # CONFIG_ISO9660_FS is not set
# CONFIG_JOLIET is not set # CONFIG_JOLIET is not set
...@@ -410,15 +534,15 @@ CONFIG_EXT2_FS=y ...@@ -410,15 +534,15 @@ CONFIG_EXT2_FS=y
# #
# CONFIG_CODA_FS is not set # CONFIG_CODA_FS is not set
# CONFIG_INTERMEZZO_FS is not set # CONFIG_INTERMEZZO_FS is not set
# CONFIG_NFS_FS is not set CONFIG_NFS_FS=y
# CONFIG_NFS_V3 is not set # CONFIG_NFS_V3 is not set
# CONFIG_ROOT_NFS is not set # CONFIG_ROOT_NFS is not set
# CONFIG_NFSD is not set CONFIG_NFSD=y
# CONFIG_NFSD_V3 is not set # CONFIG_NFSD_V3 is not set
# CONFIG_NFSD_TCP is not set # CONFIG_NFSD_TCP is not set
# CONFIG_SUNRPC is not set CONFIG_SUNRPC=y
# CONFIG_LOCKD is not set CONFIG_LOCKD=y
# CONFIG_EXPORTFS is not set CONFIG_EXPORTFS=y
# CONFIG_SMB_FS is not set # CONFIG_SMB_FS is not set
# CONFIG_NCP_FS is not set # CONFIG_NCP_FS is not set
# CONFIG_NCPFS_PACKET_SIGNING is not set # CONFIG_NCPFS_PACKET_SIGNING is not set
...@@ -471,7 +595,7 @@ CONFIG_VGA_CONSOLE=y ...@@ -471,7 +595,7 @@ CONFIG_VGA_CONSOLE=y
# #
CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_SLAB is not set
# CONFIG_MAGIC_SYSRQ is not set CONFIG_MAGIC_SYSRQ=y
# CONFIG_DEBUG_SPINLOCK is not set # CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_CHECKING is not set # CONFIG_CHECKING is not set
# CONFIG_INIT_DEBUG is not set # CONFIG_INIT_DEBUG is not set
......
...@@ -2,11 +2,16 @@ ...@@ -2,11 +2,16 @@
# Makefile for the ia32 kernel emulation subsystem. # Makefile for the ia32 kernel emulation subsystem.
# #
O_TARGET := ia32.o USE_STANDARD_AS_RULE := true
export-objs := ia32_ioctl.o sys_ia32.o
export-objs := ia32_ioctl.o all: ia32.o
O_TARGET := ia32.o
obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_ioctl.o ia32_signal.o \ obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_ioctl.o ia32_signal.o \
ia32_binfmt.o fpu32.o socket32.o ptrace32.o ia32_binfmt.o fpu32.o socket32.o ptrace32.o
clean::
include $(TOPDIR)/Rules.make include $(TOPDIR)/Rules.make
...@@ -113,6 +113,7 @@ static int w_long(unsigned int fd, unsigned int cmd, unsigned long arg) ...@@ -113,6 +113,7 @@ static int w_long(unsigned int fd, unsigned int cmd, unsigned long arg)
return err; return err;
} }
#if 0
static int rw_long(unsigned int fd, unsigned int cmd, unsigned long arg) static int rw_long(unsigned int fd, unsigned int cmd, unsigned long arg)
{ {
mm_segment_t old_fs = get_fs(); mm_segment_t old_fs = get_fs();
...@@ -128,6 +129,7 @@ static int rw_long(unsigned int fd, unsigned int cmd, unsigned long arg) ...@@ -128,6 +129,7 @@ static int rw_long(unsigned int fd, unsigned int cmd, unsigned long arg)
return -EFAULT; return -EFAULT;
return err; return err;
} }
#endif
static int do_ext2_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) static int do_ext2_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
{ {
...@@ -2971,11 +2973,6 @@ static int blkpg_ioctl_trans(unsigned int fd, unsigned int cmd, struct blkpg_ioc ...@@ -2971,11 +2973,6 @@ static int blkpg_ioctl_trans(unsigned int fd, unsigned int cmd, struct blkpg_ioc
return err; return err;
} }
static int ioc_settimeout(unsigned int fd, unsigned int cmd, unsigned long arg)
{
return rw_long(fd, AUTOFS_IOC_SETTIMEOUT, arg);
}
/* SuSE extension */ /* SuSE extension */
#ifndef TIOCGDEV #ifndef TIOCGDEV
#define TIOCGDEV _IOR('T',0x32, unsigned int) #define TIOCGDEV _IOR('T',0x32, unsigned int)
...@@ -3087,19 +3084,13 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, void *ptr) ...@@ -3087,19 +3084,13 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, void *ptr)
return err; return err;
} }
struct ioctl_trans { static int generic_long_put(unsigned int fd, unsigned int cmd, unsigned long arg)
unsigned long cmd;
int (*handler)(unsigned int, unsigned int, unsigned long, struct file * filp);
struct ioctl_trans *next;
};
/* generic function to change a single long put_user to arg to 32bit */
static int arg2long(unsigned int fd, unsigned int cmd, unsigned long arg)
{ {
int ret; int ret;
unsigned long val = 0; unsigned long val = 0;
mm_segment_t oldseg = get_fs(); mm_segment_t oldseg = get_fs();
set_fs(KERNEL_DS); set_fs(KERNEL_DS);
cmd = (cmd & 0xc000ffff) | (sizeof(unsigned long) << _IOC_SIZESHIFT);
ret = sys_ioctl(fd, cmd, (unsigned long)&val); ret = sys_ioctl(fd, cmd, (unsigned long)&val);
set_fs(oldseg); set_fs(oldseg);
if (!ret || val) { if (!ret || val) {
...@@ -3109,6 +3100,29 @@ static int arg2long(unsigned int fd, unsigned int cmd, unsigned long arg) ...@@ -3109,6 +3100,29 @@ static int arg2long(unsigned int fd, unsigned int cmd, unsigned long arg)
return ret; return ret;
} }
static int generic_long_get(unsigned int fd, unsigned int cmd, unsigned long arg)
{
int ret;
unsigned int ival;
unsigned long val = 0;
mm_segment_t oldseg = get_fs();
if (get_user(ival, (unsigned int *)arg))
return -EFAULT;
val = ival;
set_fs(KERNEL_DS);
cmd = (cmd & 0xc000ffff) | (sizeof(unsigned long) << _IOC_SIZESHIFT);
ret = sys_ioctl(fd, cmd, (unsigned long)&val);
set_fs(oldseg);
return ret;
}
struct ioctl_trans {
unsigned long cmd;
int (*handler)(unsigned int, unsigned int, unsigned long, struct file * filp);
struct ioctl_trans *next;
};
#define REF_SYMBOL(handler) if (0) (void)handler; #define REF_SYMBOL(handler) if (0) (void)handler;
#define HANDLE_IOCTL2(cmd,handler) REF_SYMBOL(handler); asm volatile(".quad %c0, " #handler ",0"::"i" (cmd)); #define HANDLE_IOCTL2(cmd,handler) REF_SYMBOL(handler); asm volatile(".quad %c0, " #handler ",0"::"i" (cmd));
#define HANDLE_IOCTL(cmd,handler) HANDLE_IOCTL2(cmd,handler) #define HANDLE_IOCTL(cmd,handler) HANDLE_IOCTL2(cmd,handler)
...@@ -3316,10 +3330,6 @@ COMPATIBLE_IOCTL(_IOR('v' , BASE_VIDIOCPRIVATE+5, int)) ...@@ -3316,10 +3330,6 @@ COMPATIBLE_IOCTL(_IOR('v' , BASE_VIDIOCPRIVATE+5, int))
COMPATIBLE_IOCTL(_IOR('v' , BASE_VIDIOCPRIVATE+6, int)) COMPATIBLE_IOCTL(_IOR('v' , BASE_VIDIOCPRIVATE+6, int))
COMPATIBLE_IOCTL(_IOR('v' , BASE_VIDIOCPRIVATE+7, int)) COMPATIBLE_IOCTL(_IOR('v' , BASE_VIDIOCPRIVATE+7, int))
/* Little p (/dev/rtc, /dev/envctrl, etc.) */ /* Little p (/dev/rtc, /dev/envctrl, etc.) */
#if 0
COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */
COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */
#endif
COMPATIBLE_IOCTL(RTC_AIE_ON) COMPATIBLE_IOCTL(RTC_AIE_ON)
COMPATIBLE_IOCTL(RTC_AIE_OFF) COMPATIBLE_IOCTL(RTC_AIE_OFF)
COMPATIBLE_IOCTL(RTC_UIE_ON) COMPATIBLE_IOCTL(RTC_UIE_ON)
...@@ -3334,10 +3344,14 @@ COMPATIBLE_IOCTL(RTC_RD_TIME) ...@@ -3334,10 +3344,14 @@ COMPATIBLE_IOCTL(RTC_RD_TIME)
COMPATIBLE_IOCTL(RTC_SET_TIME) COMPATIBLE_IOCTL(RTC_SET_TIME)
COMPATIBLE_IOCTL(RTC_WKALM_SET) COMPATIBLE_IOCTL(RTC_WKALM_SET)
COMPATIBLE_IOCTL(RTC_WKALM_RD) COMPATIBLE_IOCTL(RTC_WKALM_RD)
HANDLE_IOCTL(RTC_IRQP_READ,arg2long) #define RTC_IRQP_READ32 _IOR('p', 0x0b, unsigned int) /* Read IRQ rate */
COMPATIBLE_IOCTL(RTC_IRQP_SET) HANDLE_IOCTL(RTC_IRQP_READ32,generic_long_put)
COMPATIBLE_IOCTL(RTC_EPOCH_READ) #define RTC_IRQP_SET32 _IOW('p', 0x0c, unsigned int) /* Set IRQ rate */
COMPATIBLE_IOCTL(RTC_EPOCH_SET) HANDLE_IOCTL(RTC_IRQP_SET32,generic_long_get)
#define RTC_EPOCH_READ32 _IOR('p', 0x0d, unsigned long) /* Read epoch */
#define RTC_EPOCH_SET32 _IOW('p', 0x0e, unsigned long) /* Set epoch */
HANDLE_IOCTL(RTC_EPOCH_READ32, generic_long_put)
HANDLE_IOCTL(RTC_EPOCH_SET32, generic_long_get)
/* Little m */ /* Little m */
COMPATIBLE_IOCTL(MTIOCTOP) COMPATIBLE_IOCTL(MTIOCTOP)
/* Socket level stuff */ /* Socket level stuff */
...@@ -3605,6 +3619,8 @@ COMPATIBLE_IOCTL(AUTOFS_IOC_FAIL) ...@@ -3605,6 +3619,8 @@ COMPATIBLE_IOCTL(AUTOFS_IOC_FAIL)
COMPATIBLE_IOCTL(AUTOFS_IOC_CATATONIC) COMPATIBLE_IOCTL(AUTOFS_IOC_CATATONIC)
COMPATIBLE_IOCTL(AUTOFS_IOC_PROTOVER) COMPATIBLE_IOCTL(AUTOFS_IOC_PROTOVER)
COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE) COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE)
#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93,0x64,unsigned int)
HANDLE_IOCTL(AUTOFS_IOC_SETTIMEOUT32, generic_long_get);
/* DEVFS */ /* DEVFS */
COMPATIBLE_IOCTL(DEVFSDIOC_GET_PROTO_REV) COMPATIBLE_IOCTL(DEVFSDIOC_GET_PROTO_REV)
COMPATIBLE_IOCTL(DEVFSDIOC_SET_EVENT_MASK) COMPATIBLE_IOCTL(DEVFSDIOC_SET_EVENT_MASK)
...@@ -3671,14 +3687,6 @@ COMPATIBLE_IOCTL(DRM_IOCTL_LOCK) ...@@ -3671,14 +3687,6 @@ COMPATIBLE_IOCTL(DRM_IOCTL_LOCK)
COMPATIBLE_IOCTL(DRM_IOCTL_UNLOCK) COMPATIBLE_IOCTL(DRM_IOCTL_UNLOCK)
COMPATIBLE_IOCTL(DRM_IOCTL_FINISH) COMPATIBLE_IOCTL(DRM_IOCTL_FINISH)
#endif /* DRM */ #endif /* DRM */
#ifdef CONFIG_AUTOFS_FS
COMPATIBLE_IOCTL(AUTOFS_IOC_READY);
COMPATIBLE_IOCTL(AUTOFS_IOC_FAIL);
COMPATIBLE_IOCTL(AUTOFS_IOC_CATATONIC);
COMPATIBLE_IOCTL(AUTOFS_IOC_PROTOVER);
COMPATIBLE_IOCTL(AUTOFS_IOC_SETTIMEOUT);
COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE);
#endif
COMPATIBLE_IOCTL(REISERFS_IOC_UNPACK); COMPATIBLE_IOCTL(REISERFS_IOC_UNPACK);
/* serial driver */ /* serial driver */
HANDLE_IOCTL(TIOCGSERIAL, serial_struct_ioctl); HANDLE_IOCTL(TIOCGSERIAL, serial_struct_ioctl);
...@@ -3771,8 +3779,6 @@ HANDLE_IOCTL(CDROMREADALL, cdrom_ioctl_trans) ...@@ -3771,8 +3779,6 @@ HANDLE_IOCTL(CDROMREADALL, cdrom_ioctl_trans)
HANDLE_IOCTL(CDROM_SEND_PACKET, cdrom_ioctl_trans) HANDLE_IOCTL(CDROM_SEND_PACKET, cdrom_ioctl_trans)
HANDLE_IOCTL(LOOP_SET_STATUS, loop_status) HANDLE_IOCTL(LOOP_SET_STATUS, loop_status)
HANDLE_IOCTL(LOOP_GET_STATUS, loop_status) HANDLE_IOCTL(LOOP_GET_STATUS, loop_status)
#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93,0x64,unsigned int)
HANDLE_IOCTL(AUTOFS_IOC_SETTIMEOUT32, ioc_settimeout)
HANDLE_IOCTL(PIO_FONTX, do_fontx_ioctl) HANDLE_IOCTL(PIO_FONTX, do_fontx_ioctl)
HANDLE_IOCTL(GIO_FONTX, do_fontx_ioctl) HANDLE_IOCTL(GIO_FONTX, do_fontx_ioctl)
HANDLE_IOCTL(PIO_UNIMAP, do_unimap_ioctl) HANDLE_IOCTL(PIO_UNIMAP, do_unimap_ioctl)
......
...@@ -241,7 +241,7 @@ asmlinkage int sys32_sigreturn(struct pt_regs regs) ...@@ -241,7 +241,7 @@ asmlinkage int sys32_sigreturn(struct pt_regs regs)
return eax; return eax;
badframe: badframe:
force_sig(SIGSEGV, current); signal_fault(&regs, frame, "32bit sigreturn");
return 0; return 0;
} }
...@@ -280,7 +280,7 @@ asmlinkage int sys32_rt_sigreturn(struct pt_regs regs) ...@@ -280,7 +280,7 @@ asmlinkage int sys32_rt_sigreturn(struct pt_regs regs)
return eax; return eax;
badframe: badframe:
force_sig(SIGSEGV, current); signal_fault(&regs, frame, "32bit rt sigreturn");
return 0; return 0;
} }
...@@ -420,7 +420,7 @@ void ia32_setup_frame(int sig, struct k_sigaction *ka, ...@@ -420,7 +420,7 @@ void ia32_setup_frame(int sig, struct k_sigaction *ka,
give_sigsegv: give_sigsegv:
if (sig == SIGSEGV) if (sig == SIGSEGV)
ka->sa.sa_handler = SIG_DFL; ka->sa.sa_handler = SIG_DFL;
force_sig(SIGSEGV, current); signal_fault(regs,frame,"32bit signal setup");
} }
void ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, void ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
...@@ -493,6 +493,6 @@ void ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -493,6 +493,6 @@ void ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
give_sigsegv: give_sigsegv:
if (sig == SIGSEGV) if (sig == SIGSEGV)
ka->sa.sa_handler = SIG_DFL; ka->sa.sa_handler = SIG_DFL;
force_sig(SIGSEGV, current); signal_fault(regs, frame, "32bit rt signal setup");
} }
...@@ -24,9 +24,6 @@ ...@@ -24,9 +24,6 @@
/* /*
* 32bit SYSCALL instruction entry. * 32bit SYSCALL instruction entry.
* It'll probably kill you because it destroys your segments.
* Should coredump here, but the next instruction will likely do
* that anyways.
*/ */
ENTRY(ia32_cstar_target) ENTRY(ia32_cstar_target)
movq $-ENOSYS,%rax movq $-ENOSYS,%rax
...@@ -117,6 +114,7 @@ ENTRY(ia32_ptregs_common) ...@@ -117,6 +114,7 @@ ENTRY(ia32_ptregs_common)
.data .data
.align 8 .align 8
.globl ia32_sys_call_table
ia32_sys_call_table: ia32_sys_call_table:
.quad ni_syscall /* 0 - old "setup" system call*/ .quad ni_syscall /* 0 - old "setup" system call*/
.quad sys_exit .quad sys_exit
...@@ -143,7 +141,7 @@ ia32_sys_call_table: ...@@ -143,7 +141,7 @@ ia32_sys_call_table:
.quad sys_oldumount /* old_umount */ .quad sys_oldumount /* old_umount */
.quad sys_setuid16 .quad sys_setuid16
.quad sys_getuid16 .quad sys_getuid16
.quad ni_syscall /* stime */ /* 25 */ .quad sys_stime /* stime */ /* 25 */
.quad sys32_ptrace /* ptrace */ .quad sys32_ptrace /* ptrace */
.quad sys_alarm /* XXX sign extension??? */ .quad sys_alarm /* XXX sign extension??? */
.quad ni_syscall /* (old)fstat */ .quad ni_syscall /* (old)fstat */
...@@ -240,7 +238,7 @@ ia32_sys_call_table: ...@@ -240,7 +238,7 @@ ia32_sys_call_table:
.quad stub32_sigreturn .quad stub32_sigreturn
.quad stub32_clone /* 120 */ .quad stub32_clone /* 120 */
.quad sys_setdomainname .quad sys_setdomainname
.quad sys_newuname .quad sys32_newuname
.quad sys_modify_ldt .quad sys_modify_ldt
.quad sys32_adjtimex .quad sys32_adjtimex
.quad sys32_mprotect /* 125 */ .quad sys32_mprotect /* 125 */
......
...@@ -2579,6 +2579,8 @@ int sys32_uname(struct old_utsname * name) ...@@ -2579,6 +2579,8 @@ int sys32_uname(struct old_utsname * name)
down_read(&uts_sem); down_read(&uts_sem);
err=copy_to_user(name, &system_utsname, sizeof (*name)); err=copy_to_user(name, &system_utsname, sizeof (*name));
up_read(&uts_sem); up_read(&uts_sem);
if (current->personality == PER_LINUX32)
err |= copy_to_user(&name->machine, "i386", 5);
return err?-EFAULT:0; return err?-EFAULT:0;
} }
...@@ -3125,3 +3127,6 @@ static int __init ia32_init (void) ...@@ -3125,3 +3127,6 @@ static int __init ia32_init (void)
} }
__initcall(ia32_init); __initcall(ia32_init);
extern unsigned long ia32_sys_call_table[];
EXPORT_SYMBOL(ia32_sys_call_table);
...@@ -12,11 +12,6 @@ obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ ...@@ -12,11 +12,6 @@ obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \
pci-dma.o x8664_ksyms.o i387.o syscall.o vsyscall.o \ pci-dma.o x8664_ksyms.o i387.o syscall.o vsyscall.o \
setup64.o bluesmoke.o bootflag.o setup64.o bluesmoke.o bootflag.o
ifdef CONFIG_PCI
obj-y += pci-x86_64.o
obj-y += pci-pc.o pci-irq.o
endif
obj-$(CONFIG_MTRR) += mtrr.o obj-$(CONFIG_MTRR) += mtrr.o
obj-$(CONFIG_X86_MSR) += msr.o obj-$(CONFIG_X86_MSR) += msr.o
obj-$(CONFIG_X86_CPUID) += cpuid.o obj-$(CONFIG_X86_CPUID) += cpuid.o
......
...@@ -48,7 +48,7 @@ static int __init sbf_struct_valid(unsigned long tptr) ...@@ -48,7 +48,7 @@ static int __init sbf_struct_valid(unsigned long tptr)
unsigned int i; unsigned int i;
struct sbf_boot sb; struct sbf_boot sb;
memcpy_fromio(&sb, tptr, sizeof(sb)); memcpy_fromio(&sb, (void *)tptr, sizeof(sb));
if(sb.sbf_len != 40 && sb.sbf_len != 39) if(sb.sbf_len != 40 && sb.sbf_len != 39)
// 39 on IBM ThinkPad A21m, BIOS version 1.02b (KXET24WW; 2000-12-19). // 39 on IBM ThinkPad A21m, BIOS version 1.02b (KXET24WW; 2000-12-19).
...@@ -238,6 +238,7 @@ static int __init sbf_init(void) ...@@ -238,6 +238,7 @@ static int __init sbf_init(void)
rp = (unsigned long)ioremap(rp, 4096); rp = (unsigned long)ioremap(rp, 4096);
if(rp == 0) if(rp == 0)
continue; continue;
if(sbf_struct_valid(rp)) if(sbf_struct_valid(rp))
{ {
/* Found the BOOT table and processed it */ /* Found the BOOT table and processed it */
......
...@@ -94,15 +94,20 @@ static void early_serial_write(struct console *con, const char *s, unsigned n) ...@@ -94,15 +94,20 @@ static void early_serial_write(struct console *con, const char *s, unsigned n)
static __init void early_serial_init(char *opt) static __init void early_serial_init(char *opt)
{ {
static int bases[] = { 0x3f8, 0x2f8 };
unsigned char c; unsigned char c;
unsigned divisor, baud = 38400; unsigned divisor, baud = 38400;
char *s, *e; char *s, *e;
if (*opt == ',')
++opt;
s = strsep(&opt, ","); s = strsep(&opt, ",");
if (s != NULL) { if (s != NULL) {
unsigned port; unsigned port;
++s; if (!strncmp(s,"0x",2))
early_serial_base = simple_strtoul(s, &e, 16);
else {
static int bases[] = { 0x3f8, 0x2f8 };
if (!strncmp(s,"ttyS",4)) if (!strncmp(s,"ttyS",4))
s+=4; s+=4;
port = simple_strtoul(s, &e, 10); port = simple_strtoul(s, &e, 10);
...@@ -110,12 +115,11 @@ static __init void early_serial_init(char *opt) ...@@ -110,12 +115,11 @@ static __init void early_serial_init(char *opt)
port = 0; port = 0;
early_serial_base = bases[port]; early_serial_base = bases[port];
} }
}
c = inb(early_serial_base + LCR); outb(0x3, early_serial_base + LCR); /* 8n1 */
outb(c & ~DLAB, early_serial_base + LCR);
outb(0, early_serial_base + IER); /* no interrupt */ outb(0, early_serial_base + IER); /* no interrupt */
outb(0, early_serial_base + FCR); /* no fifo */ outb(0, early_serial_base + FCR); /* no fifo */
outb(0x3, early_serial_base + LCR); /* 8n1 */
outb(0x3, early_serial_base + MCR); /* DTR + RTS */ outb(0x3, early_serial_base + MCR); /* DTR + RTS */
s = strsep(&opt, ","); s = strsep(&opt, ",");
...@@ -155,33 +159,55 @@ void early_printk(const char *fmt, ...) ...@@ -155,33 +159,55 @@ void early_printk(const char *fmt, ...)
va_end(ap); va_end(ap);
} }
static int keep_early;
int __init setup_early_printk(char *opt) int __init setup_early_printk(char *opt)
{ {
char *space;
char buf[256];
if (early_console_initialized) if (early_console_initialized)
return; return -1;
early_console_initialized = 1;
strncpy(buf,opt,256);
buf[255] = 0;
space = strchr(buf, ' ');
if (space)
*space = 0;
if (!strncmp(opt, "serial", 6)) { if (strstr(buf,"keep"))
early_serial_init(opt+7); keep_early = 1;
if (!strncmp(buf, "serial", 6)) {
early_serial_init(buf + 6);
early_console = &early_serial_console; early_console = &early_serial_console;
} else if (!strncmp(opt, "vga", 3)) } else if (!strncmp(buf, "vga", 3)) {
early_console = &early_vga_console; early_console = &early_vga_console;
else } else {
early_console = NULL;
return -1; return -1;
}
early_console_initialized = 1;
register_console(early_console); register_console(early_console);
return 0; return 0;
} }
void __init disable_early_printk(void) void __init disable_early_printk(void)
{ {
if (early_console_initialized) { if (!early_console_initialized || !early_console)
return;
if (!keep_early) {
printk("disabling early console...\n");
unregister_console(early_console); unregister_console(early_console);
early_console_initialized = 0; early_console_initialized = 0;
} else {
printk("keeping early console.\n");
} }
} }
/* syntax: earlyprintk=vga /* syntax: earlyprintk=vga
earlyprintk=serial[,ttySn[,baudrate]] earlyprintk=serial[,ttySn[,baudrate]]
Append ,keep to not disable it when the real console takes over.
Only vga or serial at a time, not both. Only vga or serial at a time, not both.
Currently only ttyS0 and ttyS1 are supported. Currently only ttyS0 and ttyS1 are supported.
Interaction with the standard serial driver is not very good. Interaction with the standard serial driver is not very good.
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
#include <asm/errno.h>
.code64 .code64
...@@ -270,8 +271,8 @@ int_very_careful: ...@@ -270,8 +271,8 @@ int_very_careful:
/* Check for syscall exit trace */ /* Check for syscall exit trace */
bt $TIF_SYSCALL_TRACE,%edx bt $TIF_SYSCALL_TRACE,%edx
jnc int_signal jnc int_signal
movq %rsp,%rdi # &ptregs -> arg1
pushq %rdi pushq %rdi
leaq 8(%rsp),%rdi # &ptregs -> arg1
call syscall_trace call syscall_trace
popq %rdi popq %rdi
btr $TIF_SYSCALL_TRACE,%edi btr $TIF_SYSCALL_TRACE,%edi
...@@ -563,16 +564,17 @@ ENTRY(kernel_thread) ...@@ -563,16 +564,17 @@ ENTRY(kernel_thread)
# rdi: flags, rsi: usp, rdx: will be &pt_regs # rdi: flags, rsi: usp, rdx: will be &pt_regs
movq %rdx,%rdi movq %rdx,%rdi
orq $CLONE_VM, %rdi orq kernel_thread_flags(%rip), %rdi
movq $-1, %rsi movq $-1, %rsi
movq %rsp, %rdx movq %rsp, %rdx
# clone now # clone now
call do_fork_FIXME_NOW_RETURNS_TASK_STRUCT call do_fork
# save retval on the stack so it's popped before `ret`
movq %rax, RAX(%rsp) xorl %edi,%edi
cmpq $-1000,%rax
cmovb %rdi,%rax
movq %rax,RAX(%rsp)
/* /*
* It isn't worth to check for reschedule here, * It isn't worth to check for reschedule here,
......
...@@ -38,7 +38,11 @@ startup_32: ...@@ -38,7 +38,11 @@ startup_32:
movl %ebx,%ebp /* Save trampoline flag */ movl %ebx,%ebp /* Save trampoline flag */
/* First check if extended functions are implemented */ /* If the CPU doesn't support CPUID this will double fault.
* Unfortunately it is hard to check for CPUID without a stack.
*/
/* Check if extended functions are implemented */
movl $0x80000000, %eax movl $0x80000000, %eax
cpuid cpuid
cmpl $0x80000000, %eax cmpl $0x80000000, %eax
...@@ -157,6 +161,17 @@ reach_long64: ...@@ -157,6 +161,17 @@ reach_long64:
*/ */
lgdt pGDT64 lgdt pGDT64
/*
* Setup up a dummy PDA. this is just for some early bootup code
* that does in_interrupt()
*/
movl $MSR_GS_BASE,%ecx
movq $empty_zero_page,%rax
movq %rax,%rdx
shrq $32,%rdx
wrmsr
/* set up data segments. actually 0 would do too */
movl $__KERNEL_DS,%eax movl $__KERNEL_DS,%eax
movl %eax,%ds movl %eax,%ds
movl %eax,%ss movl %eax,%ss
......
...@@ -70,16 +70,18 @@ static void __init setup_boot_cpu_data(void) ...@@ -70,16 +70,18 @@ static void __init setup_boot_cpu_data(void)
boot_cpu_data.x86_mask = eax & 0xf; boot_cpu_data.x86_mask = eax & 0xf;
} }
extern void start_kernel(void), pda_init(int); extern void start_kernel(void), pda_init(int), setup_early_printk(char *);
void __init x86_64_start_kernel(char * real_mode_data) void __init x86_64_start_kernel(char * real_mode_data)
{ {
char *s;
clear_bss(); clear_bss();
pda_init(0); pda_init(0);
copy_bootdata(real_mode_data); copy_bootdata(real_mode_data);
s = strstr(saved_command_line, "earlyprintk=");
if (s != NULL)
setup_early_printk(s+12);
setup_boot_cpu_data(); setup_boot_cpu_data();
start_kernel(); start_kernel();
} }
...@@ -24,8 +24,6 @@ ...@@ -24,8 +24,6 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
static struct i387_fxsave_struct init_fpu_env;
/* /*
* Called at bootup to set up the initial FPU state that is later cloned * Called at bootup to set up the initial FPU state that is later cloned
* into all processes. * into all processes.
...@@ -77,6 +75,9 @@ int save_i387(struct _fpstate *buf) ...@@ -77,6 +75,9 @@ int save_i387(struct _fpstate *buf)
bad_user_i387_struct(); bad_user_i387_struct();
} }
if ((unsigned long)buf % 16)
printk("save_i387: bad fpstate %p\n",buf);
if (!tsk->used_math) if (!tsk->used_math)
return 0; return 0;
tsk->used_math = 0; /* trigger finit */ tsk->used_math = 0; /* trigger finit */
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/device.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/system.h> #include <asm/system.h>
...@@ -319,6 +320,18 @@ void mask_and_ack_8259A(unsigned int irq) ...@@ -319,6 +320,18 @@ void mask_and_ack_8259A(unsigned int irq)
} }
} }
static struct device device_i8259A = {
name: "i8259A",
bus_id: "0020",
};
static int __init init_8259A_devicefs(void)
{
return register_sys_device(&device_i8259A);
}
__initcall(init_8259A_devicefs);
void __init init_8259A(int auto_eoi) void __init init_8259A(int auto_eoi)
{ {
unsigned long flags; unsigned long flags;
......
...@@ -163,7 +163,7 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -163,7 +163,7 @@ int show_interrupts(struct seq_file *p, void *v)
} }
seq_printf(p, "NMI: "); seq_printf(p, "NMI: ");
for (j = 0; j < smp_num_cpus; j++) for (j = 0; j < smp_num_cpus; j++)
seq_printf(p, "%10u ", nmi_count(cpu_logical_map(j))); seq_printf(p, "%10u ", cpu_pda[cpu_logical_map(j)].__nmi_count);
seq_putc(p, '\n'); seq_putc(p, '\n');
#if CONFIG_X86_LOCAL_APIC #if CONFIG_X86_LOCAL_APIC
seq_printf(p, "LOC: "); seq_printf(p, "LOC: ");
......
...@@ -8,11 +8,6 @@ ...@@ -8,11 +8,6 @@
* This handles calls from both 32bit and 64bit mode. * This handles calls from both 32bit and 64bit mode.
*/ */
/*
* FIXME:
* Need to add locking for LAR in load_gs_index.
*/
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/string.h> #include <linux/string.h>
...@@ -20,42 +15,164 @@ ...@@ -20,42 +15,164 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/slab.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/ldt.h> #include <asm/ldt.h>
#include <asm/desc.h> #include <asm/desc.h>
void load_gs_index(unsigned gs)
{
int access;
struct task_struct *me = current;
if (me->mm)
read_lock(&me->mm->context.ldtlock);
asm volatile("pushf\n\t"
"cli\n\t"
"swapgs\n\t"
"lar %1,%0\n\t"
"jnz 1f\n\t"
"movl %1,%%eax\n\t"
"movl %%eax,%%gs\n\t"
"jmp 2f\n\t"
"1: movl %2,%%gs\n\t"
"2: swapgs\n\t"
"popf" : "=g" (access) : "g" (gs), "r" (0) : "rax");
if (me->mm)
read_unlock(&me->mm->context.ldtlock);
}
#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
static void flush_ldt(void *mm)
{
if (current->mm)
load_LDT(&current->mm->context);
}
#endif
static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
{
void *oldldt;
void *newldt;
int oldsize;
if (mincount <= pc->size)
return 0;
oldsize = pc->size;
mincount = (mincount+511)&(~511);
if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
else
newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
if (!newldt)
return -ENOMEM;
if (oldsize)
memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
oldldt = pc->ldt;
memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
wmb();
pc->ldt = newldt;
pc->size = mincount;
if (reload) {
load_LDT(pc);
#ifdef CONFIG_SMP
if (current->mm->cpu_vm_mask != (1<<smp_processor_id()))
smp_call_function(flush_ldt, 0, 1, 1);
#endif
}
wmb();
if (oldsize) {
if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(oldldt);
else
kfree(oldldt);
}
return 0;
}
static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
{
int err = alloc_ldt(new, old->size, 0);
if (err < 0) {
printk(KERN_WARNING "ldt allocation failed\n");
new->size = 0;
return err;
}
memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
return 0;
}
/* /*
* read_ldt() is not really atomic - this is not a problem since * we do not have to muck with descriptors here, that is
* synchronization of reads and writes done to the LDT has to be * done in switch_mm() as needed.
* assured by user-space anyway. Writes are atomic, to protect
* the security checks done on new descriptors.
*/ */
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
struct mm_struct * old_mm;
int retval = 0;
init_MUTEX(&mm->context.sem);
mm->context.size = 0;
old_mm = current->mm;
if (old_mm && old_mm->context.size > 0) {
down(&old_mm->context.sem);
retval = copy_ldt(&mm->context, &old_mm->context);
up(&old_mm->context.sem);
}
rwlock_init(&mm->context.ldtlock);
return retval;
}
/*
* No need to lock the MM as we are the last user
*/
void release_segments(struct mm_struct *mm)
{
if (mm->context.size) {
clear_LDT();
if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(mm->context.ldt);
else
kfree(mm->context.ldt);
mm->context.size = 0;
}
}
static int read_ldt(void * ptr, unsigned long bytecount) static int read_ldt(void * ptr, unsigned long bytecount)
{ {
int err; int err;
unsigned long size; unsigned long size;
struct mm_struct * mm = current->mm; struct mm_struct * mm = current->mm;
err = 0; if (!mm->context.size)
if (!mm->context.segments) return 0;
goto out; if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
size = LDT_ENTRIES*LDT_ENTRY_SIZE; down(&mm->context.sem);
size = mm->context.size*LDT_ENTRY_SIZE;
if (size > bytecount) if (size > bytecount)
size = bytecount; size = bytecount;
err = size; err = 0;
if (copy_to_user(ptr, mm->context.segments, size)) if (copy_to_user(ptr, mm->context.ldt, size))
err = -EFAULT; err = -EFAULT;
out: up(&mm->context.sem);
if (err < 0)
return err; return err;
if (size != bytecount) {
/* zero-fill the rest */
clear_user(ptr+size, bytecount-size);
}
return bytecount;
} }
static int read_default_ldt(void * ptr, unsigned long bytecount) static int read_default_ldt(void * ptr, unsigned long bytecount)
{ {
/* Arbitary number */ /* Arbitary number */
/* x86-64 default LDT is all zeros */
if (bytecount > 128) if (bytecount > 128)
bytecount = 128; bytecount = 128;
if (clear_user(ptr, bytecount)) if (clear_user(ptr, bytecount))
...@@ -94,24 +211,14 @@ static int write_ldt(void * ptr, unsigned long bytecount, int oldmode) ...@@ -94,24 +211,14 @@ static int write_ldt(void * ptr, unsigned long bytecount, int oldmode)
me->thread.gs = 0; me->thread.gs = 0;
me->thread.fs = 0; me->thread.fs = 0;
/* down(&mm->context.sem);
* the GDT index of the LDT is allocated dynamically, and is if (ldt_info.entry_number >= mm->context.size) {
* limited by MAX_LDT_DESCRIPTORS. error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
*/ if (error < 0)
down_write(&mm->mmap_sem);
if (!mm->context.segments) {
void * segments = vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE);
error = -ENOMEM;
if (!segments)
goto out_unlock; goto out_unlock;
memset(segments, 0, LDT_ENTRIES*LDT_ENTRY_SIZE);
wmb();
mm->context.segments = segments;
mm->context.cpuvalid = 1UL << smp_processor_id();
load_LDT(mm);
} }
lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.segments); lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
/* Allow LDTs to be cleared by the user. */ /* Allow LDTs to be cleared by the user. */
if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
...@@ -146,12 +253,14 @@ static int write_ldt(void * ptr, unsigned long bytecount, int oldmode) ...@@ -146,12 +253,14 @@ static int write_ldt(void * ptr, unsigned long bytecount, int oldmode)
/* Install the new entry ... */ /* Install the new entry ... */
install: install:
write_lock(&mm->context.ldtlock);
*lp = entry_1; *lp = entry_1;
*(lp+1) = entry_2; *(lp+1) = entry_2;
write_unlock(&mm->context.ldtlock);
error = 0; error = 0;
out_unlock: out_unlock:
up_write(&mm->mmap_sem); up(&mm->context.sem);
out: out:
return error; return error;
} }
......
...@@ -95,42 +95,6 @@ static int __init mpf_checksum(unsigned char *mp, int len) ...@@ -95,42 +95,6 @@ static int __init mpf_checksum(unsigned char *mp, int len)
return sum & 0xFF; return sum & 0xFF;
} }
/*
* Processor encoding in an MP configuration block
*/
static char __init *mpc_family(int family,int model)
{
static char n[32];
static char *model_defs[]=
{
"80486DX","80486DX",
"80486SX","80486DX/2 or 80487",
"80486SL","80486SX/2",
"Unknown","80486DX/2-WB",
"80486DX/4","80486DX/4-WB"
};
switch (family) {
case 0x04:
if (model < 10)
return model_defs[model];
break;
case 0x05:
return("Pentium(tm)");
case 0x06:
return("Pentium(tm) Pro");
case 0x0F:
if (model == 0x0F)
return("Special controller");
}
sprintf(n,"Unknown CPU [%d:%d]",family, model);
return n;
}
static void __init MP_processor_info (struct mpc_config_processor *m) static void __init MP_processor_info (struct mpc_config_processor *m)
{ {
int ver; int ver;
...@@ -138,10 +102,10 @@ static void __init MP_processor_info (struct mpc_config_processor *m) ...@@ -138,10 +102,10 @@ static void __init MP_processor_info (struct mpc_config_processor *m)
if (!(m->mpc_cpuflag & CPU_ENABLED)) if (!(m->mpc_cpuflag & CPU_ENABLED))
return; return;
printk("Processor #%d %s APIC version %d\n", printk("Processor #%d %d:%d APIC version %d\n",
m->mpc_apicid, m->mpc_apicid,
mpc_family( (m->mpc_cpufeature & CPU_FAMILY_MASK)>>8 , (m->mpc_cpufeature & CPU_FAMILY_MASK)>>8,
(m->mpc_cpufeature & CPU_MODEL_MASK)>>4), (m->mpc_cpufeature & CPU_MODEL_MASK)>>4,
m->mpc_apicver); m->mpc_apicver);
if (m->mpc_featureflag&(1<<0)) if (m->mpc_featureflag&(1<<0))
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/cpufeature.h>
/* Note: "err" is handled in a funny way below. Otherwise one version /* Note: "err" is handled in a funny way below. Otherwise one version
of gcc or another breaks. */ of gcc or another breaks. */
...@@ -57,7 +58,7 @@ static inline int wrmsr_eio(u32 reg, u32 eax, u32 edx) ...@@ -57,7 +58,7 @@ static inline int wrmsr_eio(u32 reg, u32 eax, u32 edx)
" jmp 2b\n" " jmp 2b\n"
".previous\n" ".previous\n"
".section __ex_table,\"a\"\n" ".section __ex_table,\"a\"\n"
" .align 4\n" " .align 8\n"
" .quad 1b,3b\n" " .quad 1b,3b\n"
".previous" ".previous"
: "=&bDS" (err) : "=&bDS" (err)
...@@ -236,7 +237,7 @@ static int msr_open(struct inode *inode, struct file *file) ...@@ -236,7 +237,7 @@ static int msr_open(struct inode *inode, struct file *file)
if ( !(cpu_online_map & (1UL << cpu)) ) if ( !(cpu_online_map & (1UL << cpu)) )
return -ENXIO; /* No such CPU */ return -ENXIO; /* No such CPU */
if ( !test_bit(X86_FEATURE_MSR, &c->x86_capability) ) if ( !cpu_has(c, X86_FEATURE_MSR) )
return -EIO; /* MSR not supported */ return -EIO; /* MSR not supported */
return 0; return 0;
......
...@@ -139,7 +139,7 @@ static void set_mtrr_prepare (struct set_mtrr_context *ctxt) ...@@ -139,7 +139,7 @@ static void set_mtrr_prepare (struct set_mtrr_context *ctxt)
__cli(); __cli();
/* Save value of CR4 and clear Page Global Enable (bit 7) */ /* Save value of CR4 and clear Page Global Enable (bit 7) */
if (test_bit(X86_FEATURE_PGE, &boot_cpu_data.x86_capability)) { if (cpu_has_ge) {
ctxt->cr4val = read_cr4(); ctxt->cr4val = read_cr4();
write_cr4(ctxt->cr4val & ~(1UL << 7)); write_cr4(ctxt->cr4val & ~(1UL << 7));
} }
...@@ -170,7 +170,7 @@ static void set_mtrr_done (struct set_mtrr_context *ctxt) ...@@ -170,7 +170,7 @@ static void set_mtrr_done (struct set_mtrr_context *ctxt)
write_cr0(read_cr0() & 0xbfffffff); write_cr0(read_cr0() & 0xbfffffff);
/* Restore value of CR4 */ /* Restore value of CR4 */
if (test_bit(X86_FEATURE_PGE, &boot_cpu_data.x86_capability)) if (cpu_has_pge)
write_cr4 (ctxt->cr4val); write_cr4 (ctxt->cr4val);
/* Re-enable interrupts locally (if enabled previously) */ /* Re-enable interrupts locally (if enabled previously) */
...@@ -983,7 +983,7 @@ static ssize_t mtrr_write (struct file *file, const char *buf, ...@@ -983,7 +983,7 @@ static ssize_t mtrr_write (struct file *file, const char *buf,
char *ptr; char *ptr;
char line[LINE_SIZE]; char line[LINE_SIZE];
if (!capable (CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
/* Can't seek (pwrite) on this device */ /* Can't seek (pwrite) on this device */
...@@ -1071,7 +1071,7 @@ static int mtrr_ioctl (struct inode *inode, struct file *file, ...@@ -1071,7 +1071,7 @@ static int mtrr_ioctl (struct inode *inode, struct file *file,
return -ENOIOCTLCMD; return -ENOIOCTLCMD;
case MTRRIOC_ADD_ENTRY: case MTRRIOC_ADD_ENTRY:
if (!capable (CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
if (copy_from_user (&sentry, (void *) arg, sizeof sentry)) if (copy_from_user (&sentry, (void *) arg, sizeof sentry))
return -EFAULT; return -EFAULT;
...@@ -1083,7 +1083,7 @@ static int mtrr_ioctl (struct inode *inode, struct file *file, ...@@ -1083,7 +1083,7 @@ static int mtrr_ioctl (struct inode *inode, struct file *file,
break; break;
case MTRRIOC_SET_ENTRY: case MTRRIOC_SET_ENTRY:
if (!capable (CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
if (copy_from_user (&sentry, (void *) arg, sizeof sentry)) if (copy_from_user (&sentry, (void *) arg, sizeof sentry))
return -EFAULT; return -EFAULT;
...@@ -1093,7 +1093,7 @@ static int mtrr_ioctl (struct inode *inode, struct file *file, ...@@ -1093,7 +1093,7 @@ static int mtrr_ioctl (struct inode *inode, struct file *file,
break; break;
case MTRRIOC_DEL_ENTRY: case MTRRIOC_DEL_ENTRY:
if (!capable (CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
if (copy_from_user (&sentry, (void *) arg, sizeof sentry)) if (copy_from_user (&sentry, (void *) arg, sizeof sentry))
return -EFAULT; return -EFAULT;
...@@ -1103,7 +1103,7 @@ static int mtrr_ioctl (struct inode *inode, struct file *file, ...@@ -1103,7 +1103,7 @@ static int mtrr_ioctl (struct inode *inode, struct file *file,
break; break;
case MTRRIOC_KILL_ENTRY: case MTRRIOC_KILL_ENTRY:
if (!capable (CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
if (copy_from_user (&sentry, (void *) arg, sizeof sentry)) if (copy_from_user (&sentry, (void *) arg, sizeof sentry))
return -EFAULT; return -EFAULT;
...@@ -1134,7 +1134,7 @@ static int mtrr_ioctl (struct inode *inode, struct file *file, ...@@ -1134,7 +1134,7 @@ static int mtrr_ioctl (struct inode *inode, struct file *file,
break; break;
case MTRRIOC_ADD_PAGE_ENTRY: case MTRRIOC_ADD_PAGE_ENTRY:
if (!capable (CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
if (copy_from_user (&sentry, (void *) arg, sizeof sentry)) if (copy_from_user (&sentry, (void *) arg, sizeof sentry))
return -EFAULT; return -EFAULT;
...@@ -1146,7 +1146,7 @@ static int mtrr_ioctl (struct inode *inode, struct file *file, ...@@ -1146,7 +1146,7 @@ static int mtrr_ioctl (struct inode *inode, struct file *file,
break; break;
case MTRRIOC_SET_PAGE_ENTRY: case MTRRIOC_SET_PAGE_ENTRY:
if (!capable (CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
if (copy_from_user (&sentry, (void *) arg, sizeof sentry)) if (copy_from_user (&sentry, (void *) arg, sizeof sentry))
return -EFAULT; return -EFAULT;
...@@ -1156,7 +1156,7 @@ static int mtrr_ioctl (struct inode *inode, struct file *file, ...@@ -1156,7 +1156,7 @@ static int mtrr_ioctl (struct inode *inode, struct file *file,
break; break;
case MTRRIOC_DEL_PAGE_ENTRY: case MTRRIOC_DEL_PAGE_ENTRY:
if (!capable (CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
if (copy_from_user (&sentry, (void *) arg, sizeof sentry)) if (copy_from_user (&sentry, (void *) arg, sizeof sentry))
return -EFAULT; return -EFAULT;
...@@ -1166,7 +1166,7 @@ static int mtrr_ioctl (struct inode *inode, struct file *file, ...@@ -1166,7 +1166,7 @@ static int mtrr_ioctl (struct inode *inode, struct file *file,
break; break;
case MTRRIOC_KILL_PAGE_ENTRY: case MTRRIOC_KILL_PAGE_ENTRY:
if (!capable (CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
if (copy_from_user (&sentry, (void *) arg, sizeof sentry)) if (copy_from_user (&sentry, (void *) arg, sizeof sentry))
return -EFAULT; return -EFAULT;
...@@ -1277,7 +1277,7 @@ static void __init mtrr_setup (void) ...@@ -1277,7 +1277,7 @@ static void __init mtrr_setup (void)
{ {
printk ("mtrr: v%s)\n", MTRR_VERSION); printk ("mtrr: v%s)\n", MTRR_VERSION);
if (test_bit (X86_FEATURE_MTRR, &boot_cpu_data.x86_capability)) { if (cpu_has_mtrr) {
/* Query the width (in bits) of the physical /* Query the width (in bits) of the physical
addressable memory on the Hammer family. */ addressable memory on the Hammer family. */
if ((cpuid_eax (0x80000000) >= 0x80000008)) { if ((cpuid_eax (0x80000000) >= 0x80000008)) {
......
...@@ -50,15 +50,19 @@ int __init check_nmi_watchdog (void) ...@@ -50,15 +50,19 @@ int __init check_nmi_watchdog (void)
printk(KERN_INFO "testing NMI watchdog ... "); printk(KERN_INFO "testing NMI watchdog ... ");
for (j = 0; j < NR_CPUS; ++j) for (j = 0; j < NR_CPUS; ++j) {
counts[j] = cpu_pda[cpu_logical_map(j)].__nmi_count; cpu = cpu_logical_map(j);
counts[cpu] = cpu_pda[cpu].__nmi_count;
}
sti(); sti();
mdelay((10*1000)/nmi_hz); // wait 10 ticks mdelay((10*1000)/nmi_hz); // wait 10 ticks
for (j = 0; j < smp_num_cpus; j++) { for (j = 0; j < smp_num_cpus; j++) {
cpu = cpu_logical_map(j); cpu = cpu_logical_map(j);
if (nmi_count(cpu) - counts[j] <= 5) { if (cpu_pda[cpu].__nmi_count - counts[cpu] <= 5) {
printk("CPU#%d: NMI appears to be stuck!\n", cpu); printk("CPU#%d: NMI appears to be stuck (%d)!\n",
cpu,
cpu_pda[cpu].__nmi_count);
return -1; return -1;
} }
} }
......
...@@ -57,6 +57,8 @@ ...@@ -57,6 +57,8 @@
asmlinkage extern void ret_from_fork(void); asmlinkage extern void ret_from_fork(void);
unsigned long kernel_thread_flags = CLONE_VM;
int hlt_counter; int hlt_counter;
/* /*
...@@ -318,44 +320,11 @@ void show_regs(struct pt_regs * regs) ...@@ -318,44 +320,11 @@ void show_regs(struct pt_regs * regs)
printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
} }
/*
* No need to lock the MM as we are the last user
*/
void release_segments(struct mm_struct *mm)
{
void * ldt = mm->context.segments;
/*
* free the LDT
*/
if (ldt) {
mm->context.segments = NULL;
clear_LDT();
vfree(ldt);
}
}
void load_gs_index(unsigned gs)
{
int access;
/* should load gs in syscall exit after swapgs instead */
/* XXX need to add LDT locking for SMP to protect against parallel changes */
asm volatile("pushf\n\t"
"cli\n\t"
"swapgs\n\t"
"lar %1,%0\n\t"
"jnz 1f\n\t"
"movl %1,%%eax\n\t"
"movl %%eax,%%gs\n\t"
"jmp 2f\n\t"
"1: movl %2,%%gs\n\t"
"2: swapgs\n\t"
"popf" : "=g" (access) : "g" (gs), "r" (0) : "rax");
}
#define __STR(x) #x #define __STR(x) #x
#define __STR2(x) __STR(x) #define __STR2(x) __STR(x)
extern void load_gs_index(unsigned);
/* /*
* Free current thread data structures etc.. * Free current thread data structures etc..
*/ */
...@@ -379,43 +348,16 @@ void flush_thread(void) ...@@ -379,43 +348,16 @@ void flush_thread(void)
void release_thread(struct task_struct *dead_task) void release_thread(struct task_struct *dead_task)
{ {
if (dead_task->mm) { if (dead_task->mm) {
void * ldt = dead_task->mm->context.segments; if (dead_task->mm->context.size) {
printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
// temporary debugging check dead_task->comm,
if (ldt) { dead_task->mm->context.ldt,
printk("WARNING: dead process %8s still has LDT? <%p>\n", dead_task->mm->context.size);
dead_task->comm, ldt);
BUG(); BUG();
} }
} }
} }
/*
* we do not have to muck with descriptors here, that is
* done in switch_mm() as needed.
*/
void copy_segments(struct task_struct *p, struct mm_struct *new_mm)
{
struct mm_struct * old_mm;
void *old_ldt, *ldt;
ldt = NULL;
old_mm = current->mm;
if (old_mm && (old_ldt = old_mm->context.segments) != NULL) {
/*
* Completely new LDT, we initialize it from the parent:
*/
ldt = vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE);
if (!ldt)
printk(KERN_WARNING "ldt allocation failed\n");
else
memcpy(ldt, old_ldt, LDT_ENTRIES*LDT_ENTRY_SIZE);
}
new_mm->context.segments = ldt;
new_mm->context.cpuvalid = 0UL;
return;
}
int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp, int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
unsigned long unused, unsigned long unused,
struct task_struct * p, struct pt_regs * regs) struct task_struct * p, struct pt_regs * regs)
......
...@@ -28,7 +28,6 @@ ...@@ -28,7 +28,6 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/config.h> #include <linux/config.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/apm_bios.h>
#ifdef CONFIG_BLK_DEV_RAM #ifdef CONFIG_BLK_DEV_RAM
#include <linux/blk.h> #include <linux/blk.h>
#endif #endif
...@@ -66,6 +65,8 @@ unsigned long mmu_cr4_features; ...@@ -66,6 +65,8 @@ unsigned long mmu_cr4_features;
/* For PCI or other memory-mapped resources */ /* For PCI or other memory-mapped resources */
unsigned long pci_mem_start = 0x10000000; unsigned long pci_mem_start = 0x10000000;
unsigned long saved_video_mode;
/* /*
* Setup options * Setup options
*/ */
...@@ -546,6 +547,8 @@ static inline void parse_mem_cmdline (char ** cmdline_p) ...@@ -546,6 +547,8 @@ static inline void parse_mem_cmdline (char ** cmdline_p)
unsigned long start_pfn, end_pfn; unsigned long start_pfn, end_pfn;
extern void exception_table_check(void);
void __init setup_arch(char **cmdline_p) void __init setup_arch(char **cmdline_p)
{ {
unsigned long bootmap_size, low_mem_size; unsigned long bootmap_size, low_mem_size;
...@@ -555,6 +558,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -555,6 +558,7 @@ void __init setup_arch(char **cmdline_p)
drive_info = DRIVE_INFO; drive_info = DRIVE_INFO;
screen_info = SCREEN_INFO; screen_info = SCREEN_INFO;
aux_device_present = AUX_DEVICE_INFO; aux_device_present = AUX_DEVICE_INFO;
saved_video_mode = SAVED_VIDEO_MODE;
#ifdef CONFIG_BLK_DEV_RAM #ifdef CONFIG_BLK_DEV_RAM
rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
...@@ -783,6 +787,8 @@ void __init setup_arch(char **cmdline_p) ...@@ -783,6 +787,8 @@ void __init setup_arch(char **cmdline_p)
conswitchp = &dummy_con; conswitchp = &dummy_con;
#endif #endif
#endif #endif
exception_table_check();
} }
#ifndef CONFIG_X86_TSC #ifndef CONFIG_X86_TSC
...@@ -1093,7 +1099,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -1093,7 +1099,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
else else
seq_printf(m, "stepping\t: unknown\n"); seq_printf(m, "stepping\t: unknown\n");
if ( test_bit(X86_FEATURE_TSC, &c->x86_capability) ) { if (cpu_has(c,X86_FEATURE_TSC)) {
seq_printf(m, "cpu MHz\t\t: %u.%03u\n", seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
cpu_khz / 1000, (cpu_khz % 1000)); cpu_khz / 1000, (cpu_khz % 1000));
} }
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/bootmem.h>
#include <asm/pda.h> #include <asm/pda.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -31,42 +32,64 @@ extern void ia32_cstar_target(void); ...@@ -31,42 +32,64 @@ extern void ia32_cstar_target(void);
extern struct task_struct init_task; extern struct task_struct init_task;
extern unsigned char __per_cpu_start[], __per_cpu_end[];
struct desc_ptr gdt_descr = { 0 /* filled in */, (unsigned long) gdt_table }; struct desc_ptr gdt_descr = { 0 /* filled in */, (unsigned long) gdt_table };
struct desc_ptr idt_descr = { 256 * 16, (unsigned long) idt_table }; struct desc_ptr idt_descr = { 256 * 16, (unsigned long) idt_table };
char boot_cpu_stack[IRQSTACKSIZE] __cacheline_aligned; char boot_cpu_stack[IRQSTACKSIZE] __cacheline_aligned;
void __init setup_per_cpu_areas(void)
{
unsigned long size, i;
unsigned char *ptr;
/* Copy section for each CPU (we discard the original) */
size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
if (!size)
return;
ptr = alloc_bootmem(size * NR_CPUS);
for (i = 0; i < NR_CPUS; i++, ptr += size) {
cpu_pda[cpu_logical_map(i)].cpudata_offset = ptr - __per_cpu_start;
memcpy(ptr, __per_cpu_start, size);
}
}
void pda_init(int cpu) void pda_init(int cpu)
{ {
pml4_t *level4; pml4_t *level4;
struct x8664_pda *pda = &cpu_pda[cpu];
if (cpu == 0) { if (cpu == 0) {
/* others are initialized in smpboot.c */ /* others are initialized in smpboot.c */
cpu_pda[cpu].pcurrent = &init_task; pda->pcurrent = &init_task;
cpu_pda[cpu].irqstackptr = boot_cpu_stack; pda->irqstackptr = boot_cpu_stack;
level4 = init_level4_pgt; level4 = init_level4_pgt;
} else { } else {
cpu_pda[cpu].irqstackptr = (char *) pda->irqstackptr = (char *)
__get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
if (!cpu_pda[cpu].irqstackptr) if (!pda->irqstackptr)
panic("cannot allocate irqstack for cpu %d\n", cpu); panic("cannot allocate irqstack for cpu %d\n", cpu);
level4 = (pml4_t *)__get_free_pages(GFP_ATOMIC, 0); level4 = (pml4_t *)__get_free_pages(GFP_ATOMIC, 0);
} }
if (!level4) if (!level4)
panic("Cannot allocate top level page for cpu %d", cpu); panic("Cannot allocate top level page for cpu %d", cpu);
cpu_pda[cpu].level4_pgt = (unsigned long *)level4; pda->level4_pgt = (unsigned long *)level4;
if (level4 != init_level4_pgt) if (level4 != init_level4_pgt)
memcpy(level4, &init_level4_pgt, PAGE_SIZE); memcpy(level4, &init_level4_pgt, PAGE_SIZE);
set_pml4(level4 + 510, mk_kernel_pml4(__pa_symbol(boot_vmalloc_pgt))); set_pml4(level4 + 510, mk_kernel_pml4(__pa_symbol(boot_vmalloc_pgt)));
asm volatile("movq %0,%%cr3" :: "r" (__pa(level4))); asm volatile("movq %0,%%cr3" :: "r" (__pa(level4)));
cpu_pda[cpu].irqstackptr += IRQSTACKSIZE-64; pda->irqstackptr += IRQSTACKSIZE-64;
cpu_pda[cpu].cpunumber = cpu; pda->cpunumber = cpu;
cpu_pda[cpu].irqcount = -1; pda->irqcount = -1;
cpu_pda[cpu].kernelstack = pda->kernelstack =
(unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE; (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE;
cpu_pda[cpu].me = &cpu_pda[cpu]; pda->me = pda;
pda->cpudata_offset = 0;
asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0)); asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
wrmsrl(MSR_GS_BASE, cpu_pda + cpu); wrmsrl(MSR_GS_BASE, cpu_pda + cpu);
...@@ -75,7 +98,6 @@ void pda_init(int cpu) ...@@ -75,7 +98,6 @@ void pda_init(int cpu)
#define EXCEPTION_STK_ORDER 0 /* >= N_EXCEPTION_STACKS*EXCEPTION_STKSZ */ #define EXCEPTION_STK_ORDER 0 /* >= N_EXCEPTION_STACKS*EXCEPTION_STKSZ */
char boot_exception_stacks[N_EXCEPTION_STACKS*EXCEPTION_STKSZ]; char boot_exception_stacks[N_EXCEPTION_STACKS*EXCEPTION_STKSZ];
/* /*
* cpu_init() initializes state that is per-CPU. Some data is already * cpu_init() initializes state that is per-CPU. Some data is already
* initialized (naturally) in the bootstrap process, such as the GDT * initialized (naturally) in the bootstrap process, such as the GDT
...@@ -93,6 +115,7 @@ void __init cpu_init (void) ...@@ -93,6 +115,7 @@ void __init cpu_init (void)
struct tss_struct * t = &init_tss[nr]; struct tss_struct * t = &init_tss[nr];
unsigned long v; unsigned long v;
char *estacks; char *estacks;
struct task_struct *me;
/* CPU 0 is initialised in head64.c */ /* CPU 0 is initialised in head64.c */
if (nr != 0) { if (nr != 0) {
...@@ -103,6 +126,8 @@ void __init cpu_init (void) ...@@ -103,6 +126,8 @@ void __init cpu_init (void)
} else } else
estacks = boot_exception_stacks; estacks = boot_exception_stacks;
me = current;
if (test_and_set_bit(nr, &cpu_initialized)) if (test_and_set_bit(nr, &cpu_initialized))
panic("CPU#%d already initialized!\n", nr); panic("CPU#%d already initialized!\n", nr);
...@@ -150,14 +175,14 @@ void __init cpu_init (void) ...@@ -150,14 +175,14 @@ void __init cpu_init (void)
} }
atomic_inc(&init_mm.mm_count); atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm; me->active_mm = &init_mm;
if(current->mm) if (me->mm)
BUG(); BUG();
enter_lazy_tlb(&init_mm, current, nr); enter_lazy_tlb(&init_mm, me, nr);
set_tss_desc(nr, t); set_tss_desc(nr, t);
load_TR(nr); load_TR(nr);
load_LDT(&init_mm); load_LDT(&init_mm.context);
/* /*
* Clear all 6 debug registers: * Clear all 6 debug registers:
......
...@@ -22,10 +22,8 @@ ...@@ -22,10 +22,8 @@
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/unistd.h> #include <linux/unistd.h>
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/tty.h>
#include <linux/personality.h> #include <linux/personality.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/binfmts.h>
#include <asm/ucontext.h> #include <asm/ucontext.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/i387.h> #include <asm/i387.h>
...@@ -86,7 +84,7 @@ struct rt_sigframe ...@@ -86,7 +84,7 @@ struct rt_sigframe
char *pretcode; char *pretcode;
struct ucontext uc; struct ucontext uc;
struct siginfo info; struct siginfo info;
struct _fpstate fpstate __attribute__((aligned(8))); struct _fpstate fpstate;
}; };
static int static int
...@@ -126,6 +124,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc, unsigned long *p ...@@ -126,6 +124,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc, unsigned long *p
{ {
struct _fpstate * buf; struct _fpstate * buf;
err |= __get_user(buf, &sc->fpstate); err |= __get_user(buf, &sc->fpstate);
if (buf) { if (buf) {
if (verify_area(VERIFY_READ, buf, sizeof(*buf))) if (verify_area(VERIFY_READ, buf, sizeof(*buf)))
goto badframe; goto badframe;
...@@ -147,10 +146,12 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs regs) ...@@ -147,10 +146,12 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs regs)
stack_t st; stack_t st;
long eax; long eax;
if (verify_area(VERIFY_READ, frame, sizeof(*frame))) if (verify_area(VERIFY_READ, frame, sizeof(*frame))) {
goto badframe; goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) }
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) {
goto badframe; goto badframe;
}
sigdelsetmask(&set, ~_BLOCKABLE); sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sigmask_lock); spin_lock_irq(&current->sigmask_lock);
...@@ -158,15 +159,17 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs regs) ...@@ -158,15 +159,17 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs regs)
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock); spin_unlock_irq(&current->sigmask_lock);
if (restore_sigcontext(&regs, &frame->uc.uc_mcontext, &eax)) if (restore_sigcontext(&regs, &frame->uc.uc_mcontext, &eax)) {
goto badframe; goto badframe;
}
#if DEBUG_SIG #if DEBUG_SIG
printk("%d sigreturn rip:%lx rsp:%lx frame:%p rax:%lx\n",current->pid,regs.rip,regs.rsp,frame,eax); printk("%d sigreturn rip:%lx rsp:%lx frame:%p rax:%lx\n",current->pid,regs.rip,regs.rsp,frame,eax);
#endif #endif
if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st))) if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st))) {
goto badframe; goto badframe;
}
/* It is more difficult to avoid calling this function than to /* It is more difficult to avoid calling this function than to
call it and ignore errors. */ call it and ignore errors. */
do_sigaltstack(&st, NULL, regs.rsp); do_sigaltstack(&st, NULL, regs.rsp);
...@@ -174,10 +177,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs regs) ...@@ -174,10 +177,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs regs)
return eax; return eax;
badframe: badframe:
#if DEBUG_SIG signal_fault(&regs,frame,"sigreturn");
printk("%d bad frame %p\n",current->pid,frame);
#endif
force_sig(SIGSEGV, current);
return 0; return 0;
} }
...@@ -233,8 +233,8 @@ setup_sigcontext(struct sigcontext *sc, struct _fpstate *fpstate, ...@@ -233,8 +233,8 @@ setup_sigcontext(struct sigcontext *sc, struct _fpstate *fpstate,
/* /*
* Determine which stack to use.. * Determine which stack to use..
*/ */
static inline void * static inline struct rt_sigframe *
get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) get_sigframe(struct k_sigaction *ka, struct pt_regs * regs)
{ {
unsigned long rsp; unsigned long rsp;
...@@ -247,15 +247,10 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) ...@@ -247,15 +247,10 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
rsp = current->sas_ss_sp + current->sas_ss_size; rsp = current->sas_ss_sp + current->sas_ss_size;
} }
{ rsp = (rsp - sizeof(struct _fpstate)) & ~(15UL);
extern void bad_sigframe(void); rsp -= offsetof(struct rt_sigframe, fpstate);
/* beginning of sigframe is 8 bytes misaligned, but fpstate
must end up on a 16byte boundary */
if ((offsetof(struct rt_sigframe, fpstate) & 16) != 0)
bad_sigframe();
}
return (void *)((rsp - frame_size) & ~(15UL)) - 8; return (struct rt_sigframe *) rsp;
} }
static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
...@@ -264,16 +259,17 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -264,16 +259,17 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
struct rt_sigframe *frame; struct rt_sigframe *frame;
int err = 0; int err = 0;
frame = get_sigframe(ka, regs, sizeof(*frame)); frame = get_sigframe(ka, regs);
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv; goto give_sigsegv;
if (ka->sa.sa_flags & SA_SIGINFO) { if (ka->sa.sa_flags & SA_SIGINFO) {
err |= copy_siginfo_to_user(&frame->info, info); err |= copy_siginfo_to_user(&frame->info, info);
if (err) if (err) {
goto give_sigsegv; goto give_sigsegv;
} }
}
/* Create the ucontext. */ /* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_flags);
...@@ -285,9 +281,10 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -285,9 +281,10 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate, err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate,
regs, set->sig[0]); regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
goto give_sigsegv;
if (err) {
goto give_sigsegv;
}
/* Set up to return from userspace. If provided, use a stub /* Set up to return from userspace. If provided, use a stub
already in userspace. */ already in userspace. */
...@@ -299,8 +296,10 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -299,8 +296,10 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
goto give_sigsegv; goto give_sigsegv;
} }
if (err) if (err) {
printk("fault 3\n");
goto give_sigsegv; goto give_sigsegv;
}
#if DEBUG_SIG #if DEBUG_SIG
printk("%d old rip %lx old rsp %lx old rax %lx\n", current->pid,regs->rip,regs->rsp,regs->rax); printk("%d old rip %lx old rsp %lx old rax %lx\n", current->pid,regs->rip,regs->rsp,regs->rax);
...@@ -337,7 +336,7 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -337,7 +336,7 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
give_sigsegv: give_sigsegv:
if (sig == SIGSEGV) if (sig == SIGSEGV)
ka->sa.sa_handler = SIG_DFL; ka->sa.sa_handler = SIG_DFL;
force_sig(SIGSEGV, current); signal_fault(regs,frame,"signal setup");
} }
/* /*
...@@ -459,3 +458,15 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, __u32 thread_info_ ...@@ -459,3 +458,15 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, __u32 thread_info_
if (thread_info_flags & _TIF_SIGPENDING) if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs,oldset); do_signal(regs,oldset);
} }
extern int exception_trace;
void signal_fault(struct pt_regs *regs, void *frame, char *where)
{
struct task_struct *me = current;
if (exception_trace)
printk("%s[%d] bad frame in %s frame:%p rip:%lx rsp:%lx orax:%lx\n",
me->comm,me->pid,where,frame,regs->rip,regs->rsp,regs->orig_rax);
force_sig(SIGSEGV, me);
}
...@@ -150,6 +150,7 @@ static void inline leave_mm (unsigned long cpu) ...@@ -150,6 +150,7 @@ static void inline leave_mm (unsigned long cpu)
if (cpu_tlbstate[cpu].state == TLBSTATE_OK) if (cpu_tlbstate[cpu].state == TLBSTATE_OK)
BUG(); BUG();
clear_bit(cpu, &cpu_tlbstate[cpu].active_mm->cpu_vm_mask); clear_bit(cpu, &cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
__flush_tlb();
} }
/* /*
...@@ -200,10 +201,12 @@ static void inline leave_mm (unsigned long cpu) ...@@ -200,10 +201,12 @@ static void inline leave_mm (unsigned long cpu)
asmlinkage void smp_invalidate_interrupt (void) asmlinkage void smp_invalidate_interrupt (void)
{ {
unsigned long cpu = smp_processor_id(); unsigned long cpu;
cpu = get_cpu();
if (!test_bit(cpu, &flush_cpumask)) if (!test_bit(cpu, &flush_cpumask))
return; goto out;
/* /*
* This was a BUG() but until someone can quote me the * This was a BUG() but until someone can quote me the
* line from the intel manual that guarantees an IPI to * line from the intel manual that guarantees an IPI to
...@@ -224,6 +227,9 @@ asmlinkage void smp_invalidate_interrupt (void) ...@@ -224,6 +227,9 @@ asmlinkage void smp_invalidate_interrupt (void)
} }
ack_APIC_irq(); ack_APIC_irq();
clear_bit(cpu, &flush_cpumask); clear_bit(cpu, &flush_cpumask);
out:
put_cpu();
} }
static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm, static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
...@@ -273,16 +279,23 @@ static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm, ...@@ -273,16 +279,23 @@ static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
void flush_tlb_current_task(void) void flush_tlb_current_task(void)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id()); unsigned long cpu_mask;
preempt_disable();
cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
local_flush_tlb(); local_flush_tlb();
if (cpu_mask) if (cpu_mask)
flush_tlb_others(cpu_mask, mm, FLUSH_ALL); flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
} }
void flush_tlb_mm (struct mm_struct * mm) void flush_tlb_mm (struct mm_struct * mm)
{ {
unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id()); unsigned long cpu_mask;
preempt_disable();
cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
if (current->active_mm == mm) { if (current->active_mm == mm) {
if (current->mm) if (current->mm)
...@@ -292,12 +305,17 @@ void flush_tlb_mm (struct mm_struct * mm) ...@@ -292,12 +305,17 @@ void flush_tlb_mm (struct mm_struct * mm)
} }
if (cpu_mask) if (cpu_mask)
flush_tlb_others(cpu_mask, mm, FLUSH_ALL); flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
} }
void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id()); unsigned long cpu_mask;
preempt_disable();
cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
if (current->active_mm == mm) { if (current->active_mm == mm) {
if(current->mm) if(current->mm)
...@@ -308,6 +326,8 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) ...@@ -308,6 +326,8 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
if (cpu_mask) if (cpu_mask)
flush_tlb_others(cpu_mask, mm, va); flush_tlb_others(cpu_mask, mm, va);
preempt_enable();
} }
static inline void do_flush_tlb_all_local(void) static inline void do_flush_tlb_all_local(void)
......
...@@ -50,9 +50,6 @@ ...@@ -50,9 +50,6 @@
#include <asm/kdebug.h> #include <asm/kdebug.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
/* Set if we find a B stepping CPU */
static int smp_b_stepping;
/* Setup configured maximum number of CPUs to activate */ /* Setup configured maximum number of CPUs to activate */
static int max_cpus = -1; static int max_cpus = -1;
...@@ -151,17 +148,6 @@ void __init smp_store_cpu_info(int id) ...@@ -151,17 +148,6 @@ void __init smp_store_cpu_info(int id)
*c = boot_cpu_data; *c = boot_cpu_data;
identify_cpu(c); identify_cpu(c);
/*
* Mask B, Pentium, but not Pentium MMX
*/
if (c->x86_vendor == X86_VENDOR_INTEL &&
c->x86 == 5 &&
c->x86_mask >= 1 && c->x86_mask <= 4 &&
c->x86_model <= 3)
/*
* Remember we have B step Pentia with bugs
*/
smp_b_stepping = 1;
} }
/* /*
...@@ -772,7 +758,7 @@ unsigned long cache_decay_ticks; ...@@ -772,7 +758,7 @@ unsigned long cache_decay_ticks;
static void smp_tune_scheduling (void) static void smp_tune_scheduling (void)
{ {
unsigned long cachesize; /* kB */ unsigned long cachesize; /* kB */
unsigned long bandwidth = 350; /* MB/s */ unsigned long bandwidth = 1000; /* MB/s */
/* /*
* Rough estimation for SMP scheduling, this is the number of * Rough estimation for SMP scheduling, this is the number of
* cycles it takes for a fully memory-limited process to flush * cycles it takes for a fully memory-limited process to flush
...@@ -883,8 +869,7 @@ void __init smp_boot_cpus(void) ...@@ -883,8 +869,7 @@ void __init smp_boot_cpus(void)
/* /*
* If we couldn't find a local APIC, then get out of here now! * If we couldn't find a local APIC, then get out of here now!
*/ */
if (APIC_INTEGRATED(apic_version[boot_cpu_id]) && if (APIC_INTEGRATED(apic_version[boot_cpu_id]) && !cpu_has_apic) {
!test_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability)) {
printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
boot_cpu_id); boot_cpu_id);
printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
...@@ -965,7 +950,7 @@ void __init smp_boot_cpus(void) ...@@ -965,7 +950,7 @@ void __init smp_boot_cpus(void)
Dprintk("Before bogomips.\n"); Dprintk("Before bogomips.\n");
if (!cpucount) { if (!cpucount) {
printk(KERN_ERR "Error: only one processor found.\n"); printk(KERN_INFO "Only one processor found.\n");
} else { } else {
unsigned long bogosum = 0; unsigned long bogosum = 0;
for (cpu = 0; cpu < NR_CPUS; cpu++) for (cpu = 0; cpu < NR_CPUS; cpu++)
...@@ -979,8 +964,6 @@ void __init smp_boot_cpus(void) ...@@ -979,8 +964,6 @@ void __init smp_boot_cpus(void)
} }
smp_num_cpus = cpucount + 1; smp_num_cpus = cpucount + 1;
if (smp_b_stepping)
printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");
Dprintk("Boot done.\n"); Dprintk("Boot done.\n");
/* /*
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/utsname.h> #include <linux/utsname.h>
#include <linux/personality.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/ipc.h> #include <asm/ipc.h>
...@@ -22,7 +23,7 @@ ...@@ -22,7 +23,7 @@
* sys_pipe() is the normal C calling standard for creating * sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way Unix traditionally does this, though. * a pipe. It's not the way Unix traditionally does this, though.
*/ */
asmlinkage long sys_pipe(unsigned long * fildes) asmlinkage long sys_pipe(int *fildes)
{ {
int fd[2]; int fd[2];
int error; int error;
...@@ -93,18 +94,15 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi ...@@ -93,18 +94,15 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
} }
} }
/* asmlinkage long sys_uname(struct new_utsname * name)
* Old cruft
*/
asmlinkage long sys_uname(struct old_utsname * name)
{ {
int err; int err;
if (!name)
return -EFAULT;
down_read(&uts_sem); down_read(&uts_sem);
err=copy_to_user(name, &system_utsname, sizeof (*name)); err = copy_to_user(name, &system_utsname, sizeof (*name));
up_read(&uts_sem); up_read(&uts_sem);
return err?-EFAULT:0; if (current->personality == PER_LINUX32)
err |= copy_to_user(&name->machine, "i386", 5);
return err ? -EFAULT : 0;
} }
asmlinkage long sys_pause(void) asmlinkage long sys_pause(void)
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/device.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/smp.h> #include <asm/smp.h>
...@@ -57,12 +58,8 @@ ...@@ -57,12 +58,8 @@
#include <asm/fixmap.h> #include <asm/fixmap.h>
/*
* for x86_do_profile()
*/
#include <linux/irq.h> #include <linux/irq.h>
unsigned int cpu_khz; /* Detected as we calibrate the TSC */ unsigned int cpu_khz; /* Detected as we calibrate the TSC */
/* Number of usecs that the last interrupt was delayed */ /* Number of usecs that the last interrupt was delayed */
...@@ -79,9 +76,9 @@ unsigned int __fast_gettimeoffset_quotient __section_fast_gettimeoffset_quotient ...@@ -79,9 +76,9 @@ unsigned int __fast_gettimeoffset_quotient __section_fast_gettimeoffset_quotient
extern rwlock_t xtime_lock; extern rwlock_t xtime_lock;
struct timeval __xtime __section_xtime; struct timeval __xtime __section_xtime;
volatile unsigned long __jiffies __section_jiffies;
unsigned long __wall_jiffies __section_wall_jiffies; unsigned long __wall_jiffies __section_wall_jiffies;
struct timezone __sys_tz __section_sys_tz; struct timezone __sys_tz __section_sys_tz;
volatile unsigned long __jiffies __section_jiffies;
spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED; spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
...@@ -527,3 +524,15 @@ void __init time_init(void) ...@@ -527,3 +524,15 @@ void __init time_init(void)
setup_irq(0, &irq0); setup_irq(0, &irq0);
} }
static struct device device_i8253 = {
name: "i8253",
bus_id: "0040",
};
static int time_init_driverfs(void)
{
return register_sys_device(&device_i8253);
}
__initcall(time_init_driverfs);
...@@ -77,7 +77,7 @@ asmlinkage void machine_check(void); ...@@ -77,7 +77,7 @@ asmlinkage void machine_check(void);
asmlinkage void spurious_interrupt_bug(void); asmlinkage void spurious_interrupt_bug(void);
asmlinkage void call_debug(void); asmlinkage void call_debug(void);
extern char iret_address[]; extern int exception_trace;
struct notifier_block *die_chain; struct notifier_block *die_chain;
...@@ -172,6 +172,8 @@ void show_trace(unsigned long *stack) ...@@ -172,6 +172,8 @@ void show_trace(unsigned long *stack)
i = 1; i = 1;
if (stack >= irqstack && stack < irqstack_end) { if (stack >= irqstack && stack < irqstack_end) {
unsigned long *tstack; unsigned long *tstack;
printk("<IRQ> ");
while (stack < irqstack_end) { while (stack < irqstack_end) {
addr = *stack++; addr = *stack++;
/* /*
...@@ -197,13 +199,11 @@ void show_trace(unsigned long *stack) ...@@ -197,13 +199,11 @@ void show_trace(unsigned long *stack)
tstack = (unsigned long *)(current_thread_info()+1); tstack = (unsigned long *)(current_thread_info()+1);
if (stack < tstack || (char*)stack > (char*)tstack+THREAD_SIZE) if (stack < tstack || (char*)stack > (char*)tstack+THREAD_SIZE)
printk("\n" KERN_DEBUG printk("\n" KERN_DEBUG
"no stack at the end of irqstack; stack:%lx, curstack %lx\n", "no stack at the end of irqstack; stack:%p, curstack %p\n",
stack, tstack); stack, tstack);
#endif #endif
} }
while (((long) stack & (THREAD_SIZE-1)) != 0) { while (((long) stack & (THREAD_SIZE-1)) != 0) {
addr = *stack++; addr = *stack++;
if (kernel_text_address(addr)) { if (kernel_text_address(addr)) {
...@@ -263,7 +263,7 @@ void show_stack(unsigned long * rsp) ...@@ -263,7 +263,7 @@ void show_stack(unsigned long * rsp)
void show_registers(struct pt_regs *regs) void show_registers(struct pt_regs *regs)
{ {
int i; int i;
int in_kernel = 1; int in_kernel = (regs->cs & 3) == 0;
unsigned long rsp; unsigned long rsp;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* For SMP should get the APIC id here, just to protect against corrupted GS */ /* For SMP should get the APIC id here, just to protect against corrupted GS */
...@@ -273,11 +273,8 @@ void show_registers(struct pt_regs *regs) ...@@ -273,11 +273,8 @@ void show_registers(struct pt_regs *regs)
#endif #endif
struct task_struct *cur = cpu_pda[cpu].pcurrent; struct task_struct *cur = cpu_pda[cpu].pcurrent;
rsp = (unsigned long) (&regs->rsp);
if (regs->rsp < TASK_SIZE) {
in_kernel = 0;
rsp = regs->rsp; rsp = regs->rsp;
}
printk("CPU %d ", cpu); printk("CPU %d ", cpu);
show_regs(regs); show_regs(regs);
printk("Process %s (pid: %d, stackpage=%08lx)\n", printk("Process %s (pid: %d, stackpage=%08lx)\n",
...@@ -383,7 +380,7 @@ static void do_trap(int trapnr, int signr, char *str, ...@@ -383,7 +380,7 @@ static void do_trap(int trapnr, int signr, char *str,
if ((regs->cs & 3) != 0) { if ((regs->cs & 3) != 0) {
struct task_struct *tsk = current; struct task_struct *tsk = current;
if (trapnr != 3) if (exception_trace && trapnr != 3)
printk("%s[%d] trap %s at rip:%lx rsp:%lx err:%lx\n", printk("%s[%d] trap %s at rip:%lx rsp:%lx err:%lx\n",
tsk->comm, tsk->pid, str, regs->rip, regs->rsp, error_code); tsk->comm, tsk->pid, str, regs->rip, regs->rsp, error_code);
...@@ -456,9 +453,14 @@ extern void dump_pagetable(unsigned long); ...@@ -456,9 +453,14 @@ extern void dump_pagetable(unsigned long);
asmlinkage void do_general_protection(struct pt_regs * regs, long error_code) asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
{ {
if ((regs->cs & 3)!=0) { if ((regs->cs & 3)!=0) {
current->thread.error_code = error_code; struct task_struct *tsk = current;
current->thread.trap_no = 13; if (exception_trace)
force_sig(SIGSEGV, current); printk("%s[%d] #gp at rip:%lx rsp:%lx err:%lx\n",
tsk->comm, tsk->pid, regs->rip, regs->rsp, error_code);
tsk->thread.error_code = error_code;
tsk->thread.trap_no = 13;
force_sig(SIGSEGV, tsk);
return; return;
} }
...@@ -509,8 +511,7 @@ asmlinkage void do_nmi(struct pt_regs * regs) ...@@ -509,8 +511,7 @@ asmlinkage void do_nmi(struct pt_regs * regs)
{ {
unsigned char reason = inb(0x61); unsigned char reason = inb(0x61);
add_pda(__nmi_count,1);
++nmi_count(smp_processor_id());
if (!(reason & 0xc0)) { if (!(reason & 0xc0)) {
#if CONFIG_X86_LOCAL_APIC #if CONFIG_X86_LOCAL_APIC
/* /*
......
...@@ -47,14 +47,14 @@ ...@@ -47,14 +47,14 @@
#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr))) #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
//#define NO_VSYSCALL 1 #define NO_VSYSCALL 1
#ifdef NO_VSYSCALL #ifdef NO_VSYSCALL
#include <asm/unistd.h> #include <asm/unistd.h>
static int errno __section_vxtime_sequence; static int errno __section_vxtime_sequence;
__syscall2(static inline int,int,gettimeofday,struct timeval *,tv,struct timezone *,tz) static inline _syscall2(int,gettimeofday,struct timeval *,tv,struct timezone *,tz)
#else #else
static inline void timeval_normalize(struct timeval * tv) static inline void timeval_normalize(struct timeval * tv)
...@@ -148,28 +148,11 @@ static int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz ...@@ -148,28 +148,11 @@ static int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz
static time_t __vsyscall(1) vtime(time_t * t) static time_t __vsyscall(1) vtime(time_t * t)
{ {
#ifdef NO_VSYSCALL
struct timeval tv; struct timeval tv;
gettimeofday(&tv,NULL); vgettimeofday(&tv,NULL);
if (t) *t = tv.tv_sec;
return tv.tv_sec;
#else
long sequence;
time_t __time;
do {
sequence = __vxtime_sequence[1];
rmb();
__time = __xtime.tv_sec;
rmb();
} while (sequence != __vxtime_sequence[0]);
if (t) if (t)
*t = __time; *t = tv.tv_sec;
return __time; return tv.tv_sec;
#endif
} }
static long __vsyscall(2) venosys_0(void) static long __vsyscall(2) venosys_0(void)
......
...@@ -88,8 +88,9 @@ EXPORT_SYMBOL(strncpy_from_user); ...@@ -88,8 +88,9 @@ EXPORT_SYMBOL(strncpy_from_user);
EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(clear_user); EXPORT_SYMBOL(clear_user);
EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__generic_copy_from_user); EXPORT_SYMBOL(copy_user_generic);
EXPORT_SYMBOL(__generic_copy_to_user); EXPORT_SYMBOL(copy_from_user);
EXPORT_SYMBOL(copy_to_user);
EXPORT_SYMBOL(strnlen_user); EXPORT_SYMBOL(strnlen_user);
EXPORT_SYMBOL(pci_alloc_consistent); EXPORT_SYMBOL(pci_alloc_consistent);
......
# #
# Makefile for x86_64-specific library files.. # Makefile for x86_64-specific library files.
# #
USE_STANDARD_AS_RULE := true
EXTRA_CFLAGS_csum-partial.o := -funroll-loops
L_TARGET = lib.a L_TARGET = lib.a
obj-y = generic-checksum.o old-checksum.o delay.o \ obj-y = csum-partial.o csum-copy.o csum-wrappers.o delay.o \
usercopy.o getuser.o putuser.o \ usercopy.o getuser.o putuser.o \
checksum_copy.o thunk.o mmx.o thunk.o io.o clear_page.o copy_page.o
obj-y += memcpy.o
obj-y += memmove.o
#obj-y += memset.o
obj-y += copy_user.o
export-objs := io.o csum-wrappers.o csum-partial.o
obj-$(CONFIG_IO_DEBUG) += iodebug.o obj-$(CONFIG_IO_DEBUG) += iodebug.o
obj-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o obj-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
......
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* IP/TCP/UDP checksumming routines
*
* Authors: Jorge Cwik, <jorge@laser.satlink.net>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
* Tom May, <ftom@netcom.com>
* Pentium Pro/II routines:
* Alexander Kjeldaas <astor@guardian.no>
* Finn Arne Gangstad <finnag@guardian.no>
* Lots of code moved from tcp.c and ip.c; see those files
* for more names.
*
* Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception
* handling.
* Andi Kleen, add zeroing on error
* converted to pure assembler
* Andi Kleen initial raw port to x86-64
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <asm/errno.h>
/* Version for PentiumII/PPro ported to x86-64. Still very raw and
does not exploit 64bit. */
#define SRC(y...) \
9999: y; \
.section __ex_table, "a"; \
.quad 9999b, 6001f ; \
.previous
#define DST(y...) \
9999: y; \
.section __ex_table, "a"; \
.quad 9999b, 6002f ; \
.previous
#define ROUND1(x) \
SRC(movl x(%rsi), %ebx ) ; \
addl %ebx, %eax ; \
DST(movl %ebx, x(%rdi) ) ;
#define ROUND(x) \
SRC(movl x(%rsi), %ebx ) ; \
adcl %ebx, %eax ; \
DST(movl %ebx, x(%rdi) ) ;
#define ARGBASE 0
/*
asmlinkage unsigned int csum_partial_copy_generic( const char *src, char *dst, int len, int sum,
int *src_err_ptr, int *dst_err_ptr);
rdi .. src
rsi .. dst (copy in r12)
rdx .. len (copy in r10)
rcx .. sum
r8 .. src_err_ptr
r9 .. dst_err_ptr
OPTIMIZEME: this routine should take advantage of checksumming 64bits at a time
*/
.globl csum_partial_copy_generic
csum_partial_copy_generic:
pushq %r10
pushq %r12
pushq %rbx
pushq %rbp
xchgq %rsi, %rdi
movq %rdx, %r10
movq %rsi, %r12
movq %rcx, %rax
movq %rdx, %rcx # And now it looks like PII case
movl %ecx, %ebx
movl %esi, %edx
shrl $6, %ecx
andl $0x3c, %ebx
negq %rbx
subq %rbx, %rsi
subq %rbx, %rdi
lea 3f(%rbx,%rbx), %rbx
testq %rsi, %rsi
jmp *%rbx
1: addq $64,%rsi
addq $64,%rdi
ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52)
ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36)
ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20)
ROUND (-16) ROUND(-12) ROUND(-8) ROUND(-4)
3: adcl $0,%eax
addl $64,%edx
dec %ecx
jge 1b
4: movq %r10,%rdx
andl $3, %edx
jz 7f
cmpl $2, %edx
jb 5f
SRC( movw (%rsi), %dx )
leaq 2(%rsi), %rsi
DST( movw %dx, (%rdi) )
leaq 2(%rdi), %rdi
je 6f
shll $16,%edx
5:
SRC( movb (%rsi), %dl )
DST( movb %dl, (%rdi) )
6: addl %edx, %eax
adcl $0, %eax
7:
.section .fixup, "ax"
6001:
movl $-EFAULT, (%r8)
# zero the complete destination (computing the rest is too much work)
movq %r12,%rdi # dst
movq %r10,%rcx # len
xorl %eax,%eax
rep; stosb
jmp 7b
6002: movl $-EFAULT,(%r9)
jmp 7b
.previous
popq %rbp
popq %rbx
popq %r12
popq %r10
ret
#undef ROUND
#undef ROUND1
/*
* Copyright 2002 Andi Kleen, SuSE Labs.
*/
#include <linux/linkage.h>
/*
* Zero a page.
* rdi page
*/
ENTRY(clear_page)
xorl %eax,%eax
movl $4096/128,%ecx
movl $128,%edx
loop:
#define PUT(x) movnti %rax,x*8(%rdi)
PUT(0)
PUT(1)
PUT(2)
PUT(3)
PUT(4)
PUT(5)
PUT(6)
PUT(7)
PUT(8)
PUT(9)
PUT(10)
PUT(11)
PUT(12)
PUT(13)
PUT(14)
PUT(15)
addq %rdx,%rdi
decl %ecx
jnz loop
sfence
ret
/*
* Copyright 2002 Andi Kleen, SuSE Labs.
*/
#include <linux/linkage.h>
#include <linux/config.h>
#ifdef CONFIG_PREEMPT
#warning "check your fpu context saving!"
#endif
/*
* Copy a page.
*
* rdi destination page
* rsi source page
*
* src/dst must be aligned to 16 bytes.
*
* Warning: in case of super lazy FP save this needs to be preempt_stop
*/
ENTRY(copy_page)
prefetchnta (%rsi)
prefetchnta 64(%rsi)
movq %rsp,%rax
subq $16*4,%rsp
andq $~15,%rsp
movdqa %xmm0,(%rsp)
movdqa %xmm1,16(%rsp)
movdqa %xmm2,32(%rsp)
movdqa %xmm3,48(%rsp)
movl $(4096/128)-2,%ecx
movl $128,%edx
loop:
prefetchnta (%rsi)
prefetchnta 64(%rsi)
loop_no_prefetch:
movdqa (%rsi),%xmm0
movdqa 1*16(%rsi),%xmm1
movdqa 2*16(%rsi),%xmm2
movdqa 3*16(%rsi),%xmm3
movntdq %xmm0,(%rdi)
movntdq %xmm1,16(%rdi)
movntdq %xmm2,2*16(%rdi)
movntdq %xmm3,3*16(%rdi)
movdqa 4*16(%rsi),%xmm0
movdqa 5*16(%rsi),%xmm1
movdqa 6*16(%rsi),%xmm2
movdqa 7*16(%rsi),%xmm3
movntdq %xmm0,4*16(%rdi)
movntdq %xmm1,5*16(%rdi)
movntdq %xmm2,6*16(%rdi)
movntdq %xmm3,7*16(%rdi)
addq %rdx,%rdi
addq %rdx,%rsi
decl %ecx
jns loop
cmpl $-1,%ecx
je loop_no_prefetch
sfence
movdqa (%rsp),%xmm0
movdqa 16(%rsp),%xmm1
movdqa 32(%rsp),%xmm2
movdqa 48(%rsp),%xmm3
movq %rax,%rsp
ret
/* Copyright 2002 Andi Kleen, SuSE Labs.
* Subject to the GNU Public License v2.
*
* Functions to copy from and to user space.
*/
#define FIX_ALIGNMENT 1
#include <asm/thread_info.h>
#include <asm/offset.h>
/* Standard copy_to_user with segment limit checking */
.globl copy_to_user
.p2align
copy_to_user:
GET_THREAD_INFO(%rax)
movq %rdi,%rcx
addq %rdx,%rcx
jc bad_to_user
cmpq threadinfo_addr_limit(%rax),%rcx
jae bad_to_user
jmp copy_user_generic
/* Standard copy_from_user with segment limit checking */
.globl copy_from_user
.p2align
copy_from_user:
GET_THREAD_INFO(%rax)
movq %rsi,%rcx
addq %rdx,%rcx
jc bad_from_user
cmpq threadinfo_addr_limit(%rax),%rcx
jae bad_from_user
/* FALL THROUGH to copy_user_generic */
.section .fixup,"ax"
/* must zero dest */
bad_from_user:
movl %edx,%ecx
xorl %eax,%eax
rep
stosb
bad_to_user:
movl %edx,%eax
ret
.previous
/*
* copy_user_generic - memory copy with exception handling.
*
* Input:
* rdi destination
* rsi source
* rdx count
*
* Output:
* eax uncopied bytes or 0 if successfull.
*/
.globl copy_user_generic
copy_user_generic:
/* Put the first cacheline into cache. This should handle
the small movements in ioctls etc., but not penalize the bigger
filesystem data copies too much. */
pushq %rbx
prefetcht0 (%rsi)
xorl %eax,%eax /*zero for the exception handler */
#ifdef FIX_ALIGNMENT
/* check for bad alignment of destination */
movl %edi,%ecx
andl $7,%ecx
jnz bad_alignment
after_bad_alignment:
#endif
movq %rdx,%rcx
movl $64,%ebx
shrq $6,%rdx
decq %rdx
js handle_tail
jz loop_no_prefetch
loop:
prefetchnta 64(%rsi)
loop_no_prefetch:
s1: movq (%rsi),%r11
s2: movq 1*8(%rsi),%r8
s3: movq 2*8(%rsi),%r9
s4: movq 3*8(%rsi),%r10
d1: movnti %r11,(%rdi)
d2: movnti %r8,1*8(%rdi)
d3: movnti %r9,2*8(%rdi)
d4: movnti %r10,3*8(%rdi)
s5: movq 4*8(%rsi),%r11
s6: movq 5*8(%rsi),%r8
s7: movq 6*8(%rsi),%r9
s8: movq 7*8(%rsi),%r10
d5: movnti %r11,4*8(%rdi)
d6: movnti %r8,5*8(%rdi)
d7: movnti %r9,6*8(%rdi)
d8: movnti %r10,7*8(%rdi)
addq %rbx,%rsi
addq %rbx,%rdi
decq %rdx
jz loop_no_prefetch
jns loop
handle_tail:
movl %ecx,%edx
andl $63,%ecx
shrl $3,%ecx
jz handle_7
movl $8,%ebx
loop_8:
s9: movq (%rsi),%r8
d9: movnti %r8,(%rdi)
addq %rbx,%rdi
addq %rbx,%rsi
loop loop_8
handle_7:
movl %edx,%ecx
andl $7,%ecx
jz ende
loop_1:
s10: movb (%rsi),%bl
d10: movb %bl,(%rdi)
incq %rdi
incq %rsi
loop loop_1
ende:
sfence
popq %rbx
ret
#ifdef FIX_ALIGNMENT
/* align destination */
bad_alignment:
movl $8,%r9d
subl %ecx,%r9d
movl %r9d,%ecx
subq %r9,%rdx
jz small_align
js small_align
align_1:
s11: movb (%rsi),%bl
d11: movb %bl,(%rdi)
incq %rsi
incq %rdi
loop align_1
jmp after_bad_alignment
small_align:
addq %r9,%rdx
jmp handle_7
#endif
/* table sorted by exception address */
.section __ex_table,"a"
.align 8
.quad s1,s1e
.quad s2,s2e
.quad s3,s3e
.quad s4,s4e
.quad d1,s1e
.quad d2,s2e
.quad d3,s3e
.quad d4,s4e
.quad s5,s5e
.quad s6,s6e
.quad s7,s7e
.quad s8,s8e
.quad d5,s5e
.quad d6,s6e
.quad d7,s7e
.quad d8,s8e
.quad s9,e_quad
.quad d9,e_quad
.quad s10,e_byte
.quad d10,e_byte
#ifdef FIX_ALIGNMENT
.quad s11,e_byte
.quad d11,e_byte
#endif
.quad e5,e_zero
.previous
/* compute 64-offset for main loop. 8 bytes accuracy with error on the
pessimistic side. this is gross. it would be better to fix the
interface. */
/* eax: zero, ebx: 64 */
s1e: addl $8,%eax
s2e: addl $8,%eax
s3e: addl $8,%eax
s4e: addl $8,%eax
s5e: addl $8,%eax
s6e: addl $8,%eax
s7e: addl $8,%eax
s8e: addl $8,%eax
addq %rbx,%rdi /* +64 */
subq %rax,%rdi /* correct destination with computed offset */
shlq $6,%rdx /* loop counter * 64 (stride length) */
addq %rax,%rdx /* add offset to loopcnt */
andl $63,%ecx /* remaining bytes */
addq %rcx,%rdx /* add them */
jmp zero_rest
/* exception on quad word loop in tail handling */
/* ecx: loopcnt/8, %edx: length, rdi: correct */
e_quad:
shll $3,%ecx
andl $7,%edx
addl %ecx,%edx
/* edx: bytes to zero, rdi: dest, eax:zero */
zero_rest:
movq %rdx,%rcx
e_byte:
xorl %eax,%eax
e5: rep
stosb
/* when there is another exception while zeroing the rest just return */
e_zero:
movq %rdx,%rax
jmp ende
/*
* Copyright 2002 Andi Kleen
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details. No warranty for anything given at all.
*/
#include <linux/linkage.h>
#include <asm/errno.h>
// #define FIX_ALIGNMENT 1
/*
* Checksum copy with exception handling.
* On exceptions src_err_ptr or dst_err_ptr is set to -EFAULT and the
* destination is zeroed.
*
* Input
* rdi source
* rsi destination
* edx len (32bit)
* ecx sum (32bit)
* r8 src_err_ptr (int)
* r9 dst_err_ptr (int)
*
* Output
* eax 64bit sum. undefined in case of exception.
*
* Wrappers need to take care of valid exception sum and zeroing.
*/
.macro source
10:
.section __ex_table,"a"
.align 8
.quad 10b,bad_source
.previous
.endm
.macro dest
20:
.section __ex_table,"a"
.align 8
.quad 20b,bad_dest
.previous
.endm
.globl csum_partial_copy_generic
.p2align
csum_partial_copy_generic:
prefetchnta (%rdi)
pushq %rbx
pushq %r12
pushq %r14
pushq %r15
movq %r8,%r14
movq %r9,%r15
movl %ecx,%eax
movl %edx,%ecx
#ifdef FIX_ALIGNMENT
/* align source to 8 bytes */
movl %edi,%r8d
andl $7,%r8d
jnz bad_alignment
after_bad_alignment:
#endif
movl $64,%r10d
xorl %r9d,%r9d
movq %rcx,%r12
shrq $6,%r12
/* loopcounter is maintained as one less to test efficiently for the
previous to last iteration. This is needed to stop the prefetching. */
decq %r12
js handle_tail /* < 64 */
jz loop_no_prefetch /* = 64 + X */
/* main loop. clear in 64 byte blocks */
/* tries hard not to prefetch over the boundary */
/* r10: 64, r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
/* r11: temp3, rdx: temp4, r12 loopcnt */
.p2align
loop:
/* Could prefetch more than one loop, but then it would be even
trickier to avoid prefetching over the boundary. The hardware prefetch
should take care of this anyways. The reason for this prefetch is
just the non temporal hint to avoid cache pollution. Hopefully this
will be handled properly by the hardware. */
prefetchnta 64(%rdi)
loop_no_prefetch:
source
movq (%rdi),%rbx
source
movq 8(%rdi),%r8
source
movq 16(%rdi),%r11
source
movq 24(%rdi),%rdx
dest
movnti %rbx,(%rsi)
dest
movnti %r8,8(%rsi)
dest
movnti %r11,16(%rsi)
dest
movnti %rdx,24(%rsi)
addq %rbx,%rax
adcq %r8,%rax
adcq %r11,%rax
adcq %rdx,%rax
source
movq 32(%rdi),%rbx
source
movq 40(%rdi),%r8
source
movq 48(%rdi),%r11
source
movq 56(%rdi),%rdx
dest
movnti %rbx,32(%rsi)
dest
movnti %r8,40(%rsi)
dest
movnti %r11,48(%rsi)
dest
movnti %rdx,56(%rsi)
adcq %rbx,%rax
adcq %r8,%rax
adcq %r11,%rax
adcq %rdx,%rax
adcq %r9,%rax /* add in carry */
addq %r10,%rdi
addq %r10,%rsi
decq %r12
jz loop_no_prefetch /* previous to last iteration? */
jns loop
/* do last upto 56 bytes */
handle_tail:
/* ecx: count */
movl %ecx,%r10d
andl $63,%ecx
shrl $3,%ecx
jz fold
clc
movl $8,%edx
loop_8:
source
movq (%rdi),%rbx
adcq %rbx,%rax
dest
movnti %rbx,(%rsi)
leaq (%rsi,%rdx),%rsi /* preserve carry */
leaq (%rdi,%rdx),%rdi
loop loop_8
adcq %r9,%rax /* add in carry */
fold:
movl %eax,%ebx
shrq $32,%rax
addq %rbx,%rax
/* do last upto 6 bytes */
handle_7:
movl %r10d,%ecx
andl $7,%ecx
shrl $1,%ecx
jz handle_1
movl $2,%edx
xorl %ebx,%ebx
clc
loop_1:
source
movw (%rdi),%bx
adcq %rbx,%rax
dest
movw %bx,(%rsi)
addq %rdx,%rdi
addq %rdx,%rsi
loop loop_1
adcw %r9w,%ax /* add in carry */
/* handle last odd byte */
handle_1:
testl $1,%r10d
jz ende
xorl %ebx,%ebx
source
movb (%rdi),%bl
dest
movb %bl,(%rsi)
addw %bx,%ax
adcw %r9w,%ax /* carry */
ende:
sfence
popq %r15
popq %r14
popq %r12
popq %rbx
ret
#ifdef FIX_ALIGNMENT
/* align source to 8 bytes. */
/* r8d: unalignedness, ecx len */
bad_alignment:
testl $1,%edi
jnz odd_source
/* compute distance to next aligned position */
movl $8,%r8d
xchgl %r8d,%ecx
subl %r8d,%ecx
/* handle unaligned part */
shrl $1,%ecx
xorl %ebx,%ebx
movl $2,%r10d
align_loop:
source
movw (%rdi),%bx
addq %rbx,%rax /* carry cannot happen */
dest
movw %bx,(%rsi)
addq %r10,%rdi
addq %r10,%rsi
loop align_loop
jmp after_bad_alignment
/* weird case. need to swap the sum at the end because the spec requires
16 bit words of the sum to be always paired.
handle it recursively because it should be rather rare. */
odd_source:
/* copy odd byte */
xorl %ebx,%ebx
source
movb (%rdi),%bl
addl %ebx,%eax /* add to old checksum */
adcl $0,%ecx
dest
movb %al,(%rsi)
/* fix arguments */
movl %eax,%ecx
incq %rsi
incq %rdi
decq %rdx
call csum_partial_copy_generic
bswap %eax /* this should work, but check */
jmp ende
#endif
/* Exception handlers. Very simple, zeroing is done in the wrappers */
bad_source:
movl $-EFAULT,(%r14)
jmp ende
bad_dest:
movl $-EFAULT,(%r15)
jmp ende
/* /*
* arch/x86_64/lib/checksum.c * arch/x86_64/lib/csum-partial.c
* *
* This file contains network checksum routines that are better done * This file contains network checksum routines that are better done
* in an architecture-specific manner due to speed.. * in an architecture-specific manner due to speed.
*/ */
#include <linux/string.h> #include <linux/compiler.h>
#include <asm/byteorder.h> #include <linux/module.h>
/* Better way for this sought */
static inline unsigned short from64to16(unsigned long x) static inline unsigned short from64to16(unsigned long x)
{ {
/* add up 32-bit words for 33 bits */ /* add up 32-bit words for 33 bits */
...@@ -22,13 +23,13 @@ static inline unsigned short from64to16(unsigned long x) ...@@ -22,13 +23,13 @@ static inline unsigned short from64to16(unsigned long x)
} }
/* /*
* Do a 64-bit checksum on an arbitrary memory area.. * Do a 64-bit checksum on an arbitrary memory area.
* Returns a 32bit checksum.
* *
* This isn't a great routine, but it's not _horrible_ either. The * This isn't a great routine, but it's not _horrible_ either.
* inner loop could be unrolled a bit further, and there are better * We rely on the compiler to unroll.
* ways to do the carry, but this is reasonable.
*/ */
static inline unsigned long do_csum(const unsigned char * buff, int len) static inline unsigned do_csum(const unsigned char * buff, int len)
{ {
int odd, count; int odd, count;
unsigned long result = 0; unsigned long result = 0;
...@@ -36,7 +37,7 @@ static inline unsigned long do_csum(const unsigned char * buff, int len) ...@@ -36,7 +37,7 @@ static inline unsigned long do_csum(const unsigned char * buff, int len)
if (len <= 0) if (len <= 0)
goto out; goto out;
odd = 1 & (unsigned long) buff; odd = 1 & (unsigned long) buff;
if (odd) { if (unlikely(odd)) {
result = *buff << 8; result = *buff << 8;
len--; len--;
buff++; buff++;
...@@ -59,16 +60,15 @@ static inline unsigned long do_csum(const unsigned char * buff, int len) ...@@ -59,16 +60,15 @@ static inline unsigned long do_csum(const unsigned char * buff, int len)
} }
count >>= 1; /* nr of 64-bit words.. */ count >>= 1; /* nr of 64-bit words.. */
if (count) { if (count) {
unsigned long carry = 0; unsigned long zero = 0;
do { do {
unsigned long w = *(unsigned long *) buff; asm(" addq %1,%0\n"
" adcq %2,%0\n"
: "=r" (result)
: "m" (*buff), "r" (zero), "0" (result));
count--; count--;
buff += 8; buff += 8;
result += carry;
result += w;
carry = (w > result);
} while (count); } while (count);
result += carry;
result = (result & 0xffffffff) + (result >> 32); result = (result & 0xffffffff) + (result >> 32);
} }
if (len & 4) { if (len & 4) {
...@@ -84,8 +84,8 @@ static inline unsigned long do_csum(const unsigned char * buff, int len) ...@@ -84,8 +84,8 @@ static inline unsigned long do_csum(const unsigned char * buff, int len)
if (len & 1) if (len & 1)
result += *buff; result += *buff;
result = from64to16(result); result = from64to16(result);
if (odd) if (unlikely(odd))
result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); return ((result >> 8) & 0xff) | ((result & 0xff) << 8);
out: out:
return result; return result;
} }
...@@ -100,25 +100,27 @@ static inline unsigned long do_csum(const unsigned char * buff, int len) ...@@ -100,25 +100,27 @@ static inline unsigned long do_csum(const unsigned char * buff, int len)
* this function must be called with even lengths, except * this function must be called with even lengths, except
* for the last fragment, which may be odd * for the last fragment, which may be odd
* *
* it's best to have buff aligned on a 32-bit boundary * it's best to have buff aligned on a 64-bit boundary
*/ */
unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
{ {
unsigned long result = do_csum(buff, len); unsigned result = do_csum(buff, len);
/* add in old sum, and carry.. */ /* add in old sum, and carry.. */
result += sum; asm("addl %1,%0\n\t"
/* 32+c bits -> 32 bits */ "adcl $0,%0" : "=r" (result) : "r" (sum), "0" (result));
result = (result & 0xffffffff) + (result >> 32);
return result; return result;
} }
//EXPORT_SYMBOL(csum_partial);
/* /*
* this routine is used for miscellaneous IP-like checksums, mainly * this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c * in icmp.c
*/ */
unsigned short ip_compute_csum(unsigned char * buff, int len) unsigned short ip_compute_csum(unsigned char * buff, int len)
{ {
return ~from64to16(do_csum(buff,len)); return ~csum_partial(buff,len,0);
} }
EXPORT_SYMBOL(ip_compute_csum);
/* Copyright 2002 Andi Kleen, SuSE Labs.
* Subject to the GNU Public License v.2
*
* Wrappers of assembly checksum functions for x86-64.
*/
#include <asm/checksum.h>
#include <linux/module.h>
/* Better way for this sought */
static inline unsigned from64to32(unsigned long x)
{
/* add up 32-bit words for 33 bits */
x = (x & 0xffffffff) + (x >> 32);
/* add up 16-bit and 17-bit words for 17+c bits */
x = (x & 0xffff) + (x >> 16);
/* add up 16-bit and 2-bit for 16+c bit */
x = (x & 0xffff) + (x >> 16);
return x;
}
/**
* csum_partial_copy_from_user - Copy and checksum from user space.
* @src: source address (user space)
* @dst: destination address
* @len: number of bytes to be copied.
* @isum: initial sum that is added into the result (32bit unfolded)
* @errp: set to -EFAULT for an bad source address.
*
* Returns an 32bit unfolded checksum of the buffer.
* src and dst are best aligned to 64bits.
*/
unsigned int
csum_partial_copy_from_user(const char *src, char *dst,
int len, unsigned int isum, int *errp)
{
*errp = 0;
if (likely(access_ok(VERIFY_READ,src, len))) {
unsigned long sum;
sum = csum_partial_copy_generic(src,dst,len,isum,errp,NULL);
if (likely(*errp == 0))
return from64to32(sum);
}
*errp = -EFAULT;
memset(dst,0,len);
return 0;
}
EXPORT_SYMBOL(csum_partial_copy_from_user);
/**
* csum_partial_copy_to_user - Copy and checksum to user space.
* @src: source address
* @dst: destination address (user space)
* @len: number of bytes to be copied.
* @isum: initial sum that is added into the result (32bit unfolded)
* @errp: set to -EFAULT for an bad destination address.
*
* Returns an 32bit unfolded checksum of the buffer.
* src and dst are best aligned to 64bits.
*/
unsigned int
csum_partial_copy_to_user(const char *src, char *dst,
int len, unsigned int isum, int *errp)
{
if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
*errp = -EFAULT;
return 0;
}
*errp = 0;
return from64to32(csum_partial_copy_generic(src,dst,len,isum,NULL,errp));
}
EXPORT_SYMBOL(csum_partial_copy_to_user);
/**
* csum_partial_copy_nocheck - Copy and checksum.
* @src: source address
* @dst: destination address
* @len: number of bytes to be copied.
* @isum: initial sum that is added into the result (32bit unfolded)
*
* Returns an 32bit unfolded checksum of the buffer.
*/
unsigned int
csum_partial_copy_nocheck(const char *src, char *dst, int len, unsigned int sum)
{
return from64to32(csum_partial_copy_generic(src,dst,len,sum,NULL,NULL));
}
//EXPORT_SYMBOL(csum_partial_copy_nocheck);
unsigned short csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
__u32 len, unsigned short proto, unsigned int sum)
{
__u64 rest, sum64;
rest = (__u64)htonl(len) + (__u64)htons(proto) + (__u64)sum;
asm(" addq (%[saddr]),%[sum]\n"
" adcq 8(%[saddr]),%[sum]\n"
" adcq (%[daddr]),%[sum]\n"
" adcq 8(%[daddr]),%[sum]\n"
" adcq $0,%[sum]\n"
: [sum] "=r" (sum64)
: "[sum]" (rest),[saddr] "r" (saddr), [daddr] "r" (daddr));
return csum_fold(from64to32(sum64));
}
EXPORT_SYMBOL(csum_ipv6_magic);
#include <linux/string.h>
#include <asm/io.h>
#include <linux/module.h>
void *memcpy_toio(void *dst,void*src,unsigned len)
{
return __inline_memcpy(__io_virt(dst),src,len);
}
void *memcpy_fromio(void *dst,void*src,unsigned len)
{
return __inline_memcpy(dst,__io_virt(src),len);
}
EXPORT_SYMBOL(memcpy_toio);
EXPORT_SYMBOL(memcpy_fromio);
/* Copyright 2002 Andi Kleen */
/*
* memcpy - Copy a memory block.
*
* Input:
* rdi destination
* rsi source
* rdx count
*
* Output:
* rax original destination
*/
.globl __memcpy
.globl memcpy
.p2align
__memcpy:
memcpy:
pushq %rbx
prefetcht0 (%rsi) /*for more hopefully the hw prefetch will kick in*/
movq %rdi,%rax
movl %edi,%ecx
andl $7,%ecx
jnz bad_alignment
after_bad_alignment:
movq %rdx,%rcx
movl $64,%ebx
shrq $6,%rcx
jz handle_tail
loop_64:
/* no prefetch because we assume the hw prefetcher does it already
and we have no specific temporal hint to give. XXX or give a nta
hint for the source? */
movq (%rsi),%r11
movq 8(%rsi),%r8
movq 2*8(%rsi),%r9
movq 3*8(%rsi),%r10
movnti %r11,(%rdi)
movnti %r8,1*8(%rdi)
movnti %r9,2*8(%rdi)
movnti %r10,3*8(%rdi)
movq 4*8(%rsi),%r11
movq 5*8(%rsi),%r8
movq 6*8(%rsi),%r9
movq 7*8(%rsi),%r10
movnti %r11,4*8(%rdi)
movnti %r8,5*8(%rdi)
movnti %r9,6*8(%rdi)
movnti %r10,7*8(%rdi)
addq %rbx,%rsi
addq %rbx,%rdi
loop loop_64
handle_tail:
movl %edx,%ecx
andl $63,%ecx
shrl $3,%ecx
jz handle_7
movl $8,%ebx
loop_8:
movq (%rsi),%r8
movnti %r8,(%rdi)
addq %rbx,%rdi
addq %rbx,%rsi
loop loop_8
handle_7:
movl %edx,%ecx
andl $7,%ecx
jz ende
loop_1:
movb (%rsi),%r8b
movb %r8b,(%rdi)
incq %rdi
incq %rsi
loop loop_1
ende:
sfence
popq %rbx
ret
/* align destination */
/* This is simpleminded. For bigger blocks it may make sense to align
src and dst to their aligned subset and handle the rest separately */
bad_alignment:
movl $8,%r9d
subl %ecx,%r9d
movl %r9d,%ecx
subq %r9,%rdx
js small_alignment
jz small_alignment
align_1:
movb (%rsi),%r8b
movb %r8b,(%rdi)
incq %rdi
incq %rsi
loop align_1
jmp after_bad_alignment
small_alignment:
addq %r9,%rdx
jmp handle_7
/* Normally compiler builtins are used, but sometimes the compiler calls out
of line code. Based on asm-i386/string.h.
*/
#define _STRING_C
#include <linux/string.h>
#undef memmove
void *memmove(void * dest,const void *src,size_t count)
{
if (dest < src) {
__inline_memcpy(dest,src,count);
} else {
/* Could be more clever and move longs */
unsigned long d0, d1, d2;
__asm__ __volatile__(
"std\n\t"
"rep\n\t"
"movsb\n\t"
"cld"
: "=&c" (d0), "=&S" (d1), "=&D" (d2)
:"0" (count),
"1" (count-1+(const char *)src),
"2" (count-1+(char *)dest)
:"memory");
}
return dest;
}
/* Copyright 2002 Andi Kleen, SuSE Labs */
// #define FIX_ALIGNMENT 1
/*
* ISO C memset - set a memory block to a byte value.
*
* rdi destination
* rsi value (char)
* rdx count (bytes)
*
* rax original destination
*/
.globl ____memset
.p2align
____memset:
movq %rdi,%r10 /* save destination for return address */
movq %rdx,%r11 /* save count */
/* expand byte value */
movzbl %sil,%ecx /* zero extend char value */
movabs $0x0101010101010101,%rax /* expansion pattern */
mul %rcx /* expand with rax, clobbers rdx */
#ifdef FIX_ALIGNMENT
/* align dst */
movl %edi,%r9d
andl $7,%r9d /* test unaligned bits */
jnz bad_alignment
after_bad_alignment:
#endif
movq %r11,%rcx /* restore count */
shrq $6,%rcx /* divide by 64 */
jz handle_tail /* block smaller than 64 bytes? */
movl $64,%r8d /* CSE loop block size */
loop_64:
movnti %rax,0*8(%rdi)
movnti %rax,1*8(%rdi)
movnti %rax,2*8(%rdi)
movnti %rax,3*8(%rdi)
movnti %rax,4*8(%rdi)
movnti %rax,5*8(%rdi)
movnti %rax,6*8(%rdi)
movnti %rax,7*8(%rdi) /* clear 64 byte blocks */
addq %r8,%rdi /* increase pointer by 64 bytes */
loop loop_64 /* decrement rcx and if not zero loop */
/* Handle tail in loops. The loops should be faster than hard
to predict jump tables. */
handle_tail:
movl %r11d,%ecx
andl $63,%ecx
shrl $3,%ecx
jz handle_7
loop_8:
movnti %rax,(%rdi) /* long words */
addq $8,%rdi
loop loop_8
handle_7:
movl %r11d,%ecx
andl $7,%ecx
jz ende
loop_1:
movb %al,(%rdi) /* bytes */
incq %rdi
loop loop_1
ende:
movq %r10,%rax
ret
#ifdef FIX_ALIGNMENT
bad_alignment:
andq $-8,%r11 /* shorter than 8 bytes */
jz handle_7 /* if yes handle it in the tail code */
movnti %rax,(%rdi) /* unaligned store of 8 bytes */
movq $8,%r8
subq %r9,%r8 /* compute alignment (8-misalignment) */
addq %r8,%rdi /* fix destination */
subq %r8,%r11 /* fix count */
jmp after_bad_alignment
#endif
#include <linux/types.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/compiler.h>
#include <asm/i387.h>
#include <asm/hardirq.h>
#include <asm/page.h>
/*
* MMX 3DNow! library helper functions
*
* To do:
* We can use MMX just for prefetch in IRQ's. This may be a win.
* (reported so on K6-III)
* We should use a better code neutral filler for the short jump
* leal ebx. [ebx] is apparently best for K6-2, but Cyrix ??
* We also want to clobber the filler register so we dont get any
* register forwarding stalls on the filler.
*
* Add *user handling. Checksums are not a win with MMX on any CPU
* tested so far for any MMX solution figured.
*
* 22/09/2000 - Arjan van de Ven
* Improved for non-egineering-sample Athlons
*
* 2002 Andi Kleen. Some cleanups and changes for x86-64.
* Not really tuned yet. Using the Athlon version for now.
* This currenly uses MMX for 8 byte stores, but on hammer we could
* use integer 8 byte stores too and avoid the FPU save overhead.
* Disadvantage is that the integer load/stores have strong ordering
* model and may be slower.
*
* $Id$
*/
#ifdef MMX_MEMCPY_THRESH
void *_mmx_memcpy(void *to, const void *from, size_t len)
{
void *p;
int i;
p = to;
if (unlikely(in_interrupt()))
goto standard;
/* XXX: check if this is still memory bound with unaligned to/from.
if not align them here to 8bytes. */
i = len >> 6; /* len/64 */
kernel_fpu_begin();
__asm__ __volatile__ (
" prefetch (%0)\n" /* This set is 28 bytes */
" prefetch 64(%0)\n"
" prefetch 128(%0)\n"
" prefetch 192(%0)\n"
" prefetch 256(%0)\n"
"\n"
: : "r" (from) );
for(; i>5; i--)
{
__asm__ __volatile__ (
" prefetch 320(%0)\n"
" movq (%0), %%mm0\n"
" movq 8(%0), %%mm1\n"
" movq 16(%0), %%mm2\n"
" movq 24(%0), %%mm3\n"
" movq %%mm0, (%1)\n"
" movq %%mm1, 8(%1)\n"
" movq %%mm2, 16(%1)\n"
" movq %%mm3, 24(%1)\n"
" movq 32(%0), %%mm0\n"
" movq 40(%0), %%mm1\n"
" movq 48(%0), %%mm2\n"
" movq 56(%0), %%mm3\n"
" movq %%mm0, 32(%1)\n"
" movq %%mm1, 40(%1)\n"
" movq %%mm2, 48(%1)\n"
" movq %%mm3, 56(%1)\n"
: : "r" (from), "r" (to) : "memory");
from+=64;
to+=64;
}
for(; i>0; i--)
{
__asm__ __volatile__ (
" movq (%0), %%mm0\n"
" movq 8(%0), %%mm1\n"
" movq 16(%0), %%mm2\n"
" movq 24(%0), %%mm3\n"
" movq %%mm0, (%1)\n"
" movq %%mm1, 8(%1)\n"
" movq %%mm2, 16(%1)\n"
" movq %%mm3, 24(%1)\n"
" movq 32(%0), %%mm0\n"
" movq 40(%0), %%mm1\n"
" movq 48(%0), %%mm2\n"
" movq 56(%0), %%mm3\n"
" movq %%mm0, 32(%1)\n"
" movq %%mm1, 40(%1)\n"
" movq %%mm2, 48(%1)\n"
" movq %%mm3, 56(%1)\n"
: : "r" (from), "r" (to) : "memory");
from+=64;
to+=64;
}
len &= 63;
kernel_fpu_end();
/*
* Now do the tail of the block
*/
standard:
__inline_memcpy(to, from, len);
return p;
}
#endif
static inline void fast_clear_page(void *page)
{
int i;
kernel_fpu_begin();
__asm__ __volatile__ (
" pxor %%mm0, %%mm0\n" : :
);
for(i=0;i<4096/64;i++)
{
__asm__ __volatile__ (
" movntq %%mm0, (%0)\n"
" movntq %%mm0, 8(%0)\n"
" movntq %%mm0, 16(%0)\n"
" movntq %%mm0, 24(%0)\n"
" movntq %%mm0, 32(%0)\n"
" movntq %%mm0, 40(%0)\n"
" movntq %%mm0, 48(%0)\n"
" movntq %%mm0, 56(%0)\n"
: : "r" (page) : "memory");
page+=64;
}
/* since movntq is weakly-ordered, a "sfence" is needed to become
* ordered again.
*/
__asm__ __volatile__ (
" sfence \n" : :
);
kernel_fpu_end();
}
static inline void fast_copy_page(void *to, void *from)
{
int i;
kernel_fpu_begin();
/* maybe the prefetch stuff can go before the expensive fnsave...
* but that is for later. -AV
*/
__asm__ __volatile__ (
" prefetch (%0)\n"
" prefetch 64(%0)\n"
" prefetch 128(%0)\n"
" prefetch 192(%0)\n"
" prefetch 256(%0)\n"
: : "r" (from) );
for(i=0; i<(4096-320)/64; i++)
{
__asm__ __volatile__ (
" prefetch 320(%0)\n"
" movq (%0), %%mm0\n"
" movntq %%mm0, (%1)\n"
" movq 8(%0), %%mm1\n"
" movntq %%mm1, 8(%1)\n"
" movq 16(%0), %%mm2\n"
" movntq %%mm2, 16(%1)\n"
" movq 24(%0), %%mm3\n"
" movntq %%mm3, 24(%1)\n"
" movq 32(%0), %%mm4\n"
" movntq %%mm4, 32(%1)\n"
" movq 40(%0), %%mm5\n"
" movntq %%mm5, 40(%1)\n"
" movq 48(%0), %%mm6\n"
" movntq %%mm6, 48(%1)\n"
" movq 56(%0), %%mm7\n"
" movntq %%mm7, 56(%1)\n"
: : "r" (from), "r" (to) : "memory");
from+=64;
to+=64;
}
for(i=(4096-320)/64; i<4096/64; i++)
{
__asm__ __volatile__ (
"2: movq (%0), %%mm0\n"
" movntq %%mm0, (%1)\n"
" movq 8(%0), %%mm1\n"
" movntq %%mm1, 8(%1)\n"
" movq 16(%0), %%mm2\n"
" movntq %%mm2, 16(%1)\n"
" movq 24(%0), %%mm3\n"
" movntq %%mm3, 24(%1)\n"
" movq 32(%0), %%mm4\n"
" movntq %%mm4, 32(%1)\n"
" movq 40(%0), %%mm5\n"
" movntq %%mm5, 40(%1)\n"
" movq 48(%0), %%mm6\n"
" movntq %%mm6, 48(%1)\n"
" movq 56(%0), %%mm7\n"
" movntq %%mm7, 56(%1)\n"
: : "r" (from), "r" (to) : "memory");
from+=64;
to+=64;
}
/* since movntq is weakly-ordered, a "sfence" is needed to become
* ordered again.
*/
__asm__ __volatile__ (
" sfence \n" : :
);
kernel_fpu_end();
}
void mmx_clear_page(void * page)
{
#if 1
__builtin_memset(page,0,PAGE_SIZE);
#else
/* AK: these in_interrupt checks should not be needed. */
if(unlikely(in_interrupt()))
__builtin_memset(page,0,PAGE_SIZE);
else
fast_clear_page(page);
#endif
}
void mmx_copy_page(void *to, void *from)
{
#if 1
__builtin_memcpy(to,from,PAGE_SIZE);
#else
/* AK: these in_interrupt checks should not be needed. */
if(unlikely(in_interrupt()))
__builtin_memcpy(to,from,PAGE_SIZE);
else
fast_copy_page(to, from);
#endif
}
#include <asm/calling.h>
/*
* Save registers for the slow path of semaphores here to avoid
* disturbance of register allocation in fast paths with function calls.
* Written 2001 by Andi Kleen.
*/
.macro rwsem_thunk name,func
.globl \name
\name:
SAVE_ARGS
movq %rax,%rdi
call \func
jmp restore
.endm
rwsem_thunk rwsem_down_read_failed_thunk,rwsem_down_read_failed
rwsem_thunk rwsem_down_write_failed_thunk,rwsem_down_write_failed
rwsem_thunk rwsem_wake_thunk,rwsem_wake
/* This does not really belong here, but the macros are so
convenient. */
rwsem_thunk do_softirq_thunk,do_softirq
restore:
RESTORE_ARGS
ret
/* /*
* User address space access functions. * User address space access functions.
* The non inlined parts of asm-i386/uaccess.h are here.
* *
* Copyright 1997 Andi Kleen <ak@muc.de> * Copyright 1997 Andi Kleen <ak@muc.de>
* Copyright 1997 Linus Torvalds * Copyright 1997 Linus Torvalds
* Copyright 2002 Andi Kleen <ak@suse.de>
*/ */
#include <linux/config.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/mmx.h>
unsigned long
__generic_copy_to_user(void *to, const void *from, unsigned long n)
{
prefetch(from);
if (access_ok(VERIFY_WRITE, to, n))
__copy_user(to,from,n);
return n;
}
unsigned long
__generic_copy_from_user(void *to, const void *from, unsigned long n)
{
prefetchw(to);
if (access_ok(VERIFY_READ, from, n))
__copy_user_zeroing(to,from,n);
else
memset(to, 0, n);
return n;
}
/* /*
* Copy a null terminated string from userspace. * Copy a null terminated string from userspace.
*/ */
#define __do_strncpy_from_user(dst,src,count,res) \ long __strncpy_from_user(char *dst, const char *src, long count)
do { \
long __d0, __d1, __d2; \
__asm__ __volatile__( \
" testq %1,%1\n" \
" jz 2f\n" \
"0: lodsb\n" \
" stosb\n" \
" testb %%al,%%al\n" \
" jz 1f\n" \
" decq %1\n" \
" jnz 0b\n" \
"1: subq %1,%0\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: movq %5,%0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .quad 0b,3b\n" \
".previous" \
: "=r"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \
"=&D" (__d2) \
: "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
: "memory"); \
} while (0)
long
__strncpy_from_user(char *dst, const char *src, long count)
{ {
long res; long res;
__do_strncpy_from_user(dst, src, count, res); long __d0, __d1, __d2;
asm volatile( \
" testq %1,%1\n"
" jz 2f\n"
"0: lodsb\n"
" stosb\n"
" testb %%al,%%al\n"
" loopnz 0b\n"
"1: subq %1,%0\n"
"2:\n"
".section .fixup,\"ax\"\n"
"3: movq %5,%0\n"
" jmp 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
" .align 8\n"
" .quad 0b,3b\n"
".previous"
: "=r"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1),
"=&D" (__d2)
: "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst)
: "memory");
return res; return res;
} }
long long strncpy_from_user(char *dst, const char *src, long count)
strncpy_from_user(char *dst, const char *src, long count)
{ {
long res = -EFAULT;
if (access_ok(VERIFY_READ, src, 1)) if (access_ok(VERIFY_READ, src, 1))
__do_strncpy_from_user(dst, src, count, res); return __strncpy_from_user(dst, src, count);
return res; return -EFAULT;
} }
/* /*
* Zero Userspace * Zero Userspace
*/ */
#define __do_clear_user(addr,size) \ unsigned long __clear_user(void *addr, unsigned long size)
do { \
long __d0; \
__asm__ __volatile__( \
"cld\n" \
"0: rep; stosl\n" \
" movq %2,%0\n" \
"1: rep; stosb\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: lea 0(%2,%0,4),%0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .quad 0b,3b\n" \
" .quad 1b,2b\n" \
".previous" \
: "=&c"(size), "=&D" (__d0) \
: "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
} while (0)
unsigned long
clear_user(void *to, unsigned long n)
{ {
if (access_ok(VERIFY_WRITE, to, n)) long __d0;
__do_clear_user(to, n); /* no memory constraint because it doesn't change any memory gcc knows
return n; about */
asm volatile(
" testq %[size8],%[size8]\n"
" jz 4f\n"
"0: movnti %[zero],(%[dst])\n"
" addq %[eight],%[dst]\n"
" loop 0b\n"
"4: movq %[size1],%%rcx\n"
" testl %%ecx,%%ecx\n"
" jz 2f\n"
"1: movb %b[zero],(%[dst])\n"
" incq %[dst]\n"
" loop 1b\n"
"2: sfence\n"
".section .fixup,\"ax\"\n"
"3: lea 0(%[size1],%[size8],8),%[size8]\n"
" jmp 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
" .align 8\n"
" .quad 0b,3b\n"
" .quad 1b,2b\n"
".previous"
: [size8] "=c"(size), [dst] "=&D" (__d0)
: [size1] "r"(size & 7), "[size8]" (size / 8), "[dst] "(addr),
[zero] "r" (0UL), [eight] "r" (8UL));
return size;
} }
unsigned long
__clear_user(void *to, unsigned long n) unsigned long clear_user(void *to, unsigned long n)
{ {
__do_clear_user(to, n); if (access_ok(VERIFY_WRITE, to, n))
return __clear_user(to, n);
return n; return n;
} }
......
/* /*
* linux/arch/i386/mm/extable.c * linux/arch/x86_64/mm/extable.c
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <linux/init.h>
extern const struct exception_table_entry __start___ex_table[]; extern const struct exception_table_entry __start___ex_table[];
extern const struct exception_table_entry __stop___ex_table[]; extern const struct exception_table_entry __stop___ex_table[];
static inline unsigned long
void __init exception_table_check(void)
{
const struct exception_table_entry *e;
unsigned long prev;
prev = 0;
for (e = __start___ex_table; e < __stop___ex_table; e++) {
if (e->insn < prev) {
panic("unordered exception table at %016lx:%016lx and %016lx:%016lx\n",
prev, e[-1].fixup,
e->insn, e->fixup);
}
prev = e->insn;
}
}
static unsigned long
search_one_table(const struct exception_table_entry *first, search_one_table(const struct exception_table_entry *first,
const struct exception_table_entry *last, const struct exception_table_entry *last,
unsigned long value) unsigned long value)
...@@ -41,8 +59,7 @@ search_exception_table(unsigned long addr) ...@@ -41,8 +59,7 @@ search_exception_table(unsigned long addr)
#ifndef CONFIG_MODULES #ifndef CONFIG_MODULES
/* There is only the kernel to search. */ /* There is only the kernel to search. */
ret = search_one_table(__start___ex_table, __stop___ex_table-1, addr); return search_one_table(__start___ex_table, __stop___ex_table-1, addr);
return ret;
#else #else
/* The kernel is the last "module" -- no need to treat it special. */ /* The kernel is the last "module" -- no need to treat it special. */
struct module *mp; struct module *mp;
......
...@@ -85,7 +85,7 @@ void dump_pagetable(unsigned long address) ...@@ -85,7 +85,7 @@ void dump_pagetable(unsigned long address)
} }
int page_fault_trace; int page_fault_trace;
int exception_trace; int exception_trace = 1;
/* /*
* This routine handles page faults. It determines the address, * This routine handles page faults. It determines the address,
...@@ -311,11 +311,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -311,11 +311,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (!pte_present(*pte)) if (!pte_present(*pte))
goto bad_area_nosemaphore; goto bad_area_nosemaphore;
/* Strictly a flush_tlb_all because vmalloc is global, __flush_tlb_all();
but this only applies to new global pages so it should
not be needed. vmalloc will likely touch multiple ptes,
so do a full flush instead of a partial one. */
__flush_tlb();
return; return;
} }
} }
...@@ -153,6 +153,7 @@ static struct temp_map { ...@@ -153,6 +153,7 @@ static struct temp_map {
} temp_mappings[] __initdata = { } temp_mappings[] __initdata = {
{ &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) }, { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
{ &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) }, { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
{ &temp_boot_pmds[1], (void *)(44UL * 1024 * 1024) },
{} {}
}; };
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
*/ */
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/init.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
...@@ -165,5 +166,5 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag ...@@ -165,5 +166,5 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag
void iounmap(void *addr) void iounmap(void *addr)
{ {
if (addr > high_memory) if (addr > high_memory)
return vfree((void *) (PAGE_MASK & (unsigned long) addr)); vfree((void *) (PAGE_MASK & (unsigned long) addr));
} }
O_TARGET := pci.o
obj-y := x86-64.o
obj-$(CONFIG_PCI_DIRECT) += direct.o
obj-y += fixup.o
ifdef CONFIG_ACPI_PCI
obj-y += acpi.o
endif
obj-y += legacy.o
obj-y += irq.o common.o
include $(TOPDIR)/Rules.make
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/init.h>
#include "pci.h"
static int __init pci_acpi_init(void)
{
if (pcibios_scanned)
return 0;
if (!(pci_probe & PCI_NO_ACPI_ROUTING)) {
if (!acpi_pci_irq_init()) {
printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
printk(KERN_INFO "PCI: if you experience problems, try using option 'pci=noacpi'\n");
pcibios_scanned++;
pcibios_enable_irq = acpi_pci_irq_enable;
} else
printk(KERN_WARNING "PCI: Invalid ACPI-PCI IRQ routing table\n");
}
return 0;
}
subsys_initcall(pci_acpi_init);
See arch/i386/pci/changelog for early changelog.
/*
* Low-Level PCI Support for PC
*
* (c) 1999--2000 Martin Mares <mj@ucw.cz>
Note: on x86-64 there is no PCI BIOS so there is no way to sort in the
same order as 32bit Linux. This could cause grief for dualbooting because
devices may wander. May want to use ACPI for sorting eventually.
*/
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <asm/segment.h>
#include <asm/io.h>
#include <asm/smp.h>
#include "pci.h"
unsigned int pci_probe = PCI_PROBE_CONF1 | PCI_PROBE_CONF2;
int pcibios_last_bus = 0xfe; /* XXX */
struct pci_bus *pci_root_bus = NULL;
struct pci_ops *pci_root_ops = NULL;
int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value) = NULL;
int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value) = NULL;
/*
* legacy, numa, and acpi all want to call pcibios_scan_root
* from their initcalls. This flag prevents that.
*/
int pcibios_scanned;
/*
* This interrupt-safe spinlock protects all accesses to PCI
* configuration space.
*/
spinlock_t pci_config_lock = SPIN_LOCK_UNLOCKED;
/*
* Several buggy motherboards address only 16 devices and mirror
* them to next 16 IDs. We try to detect this `feature' on all
* primary buses (those containing host bridges as they are
* expected to be unique) and remove the ghost devices.
*/
static void __devinit pcibios_fixup_ghosts(struct pci_bus *b)
{
struct list_head *ln, *mn;
struct pci_dev *d, *e;
int mirror = PCI_DEVFN(16,0);
int seen_host_bridge = 0;
int i;
DBG("PCI: Scanning for ghost devices on bus %d\n", b->number);
for (ln=b->devices.next; ln != &b->devices; ln=ln->next) {
d = pci_dev_b(ln);
if ((d->class >> 8) == PCI_CLASS_BRIDGE_HOST)
seen_host_bridge++;
for (mn=ln->next; mn != &b->devices; mn=mn->next) {
e = pci_dev_b(mn);
if (e->devfn != d->devfn + mirror ||
e->vendor != d->vendor ||
e->device != d->device ||
e->class != d->class)
continue;
for(i=0; i<PCI_NUM_RESOURCES; i++)
if (e->resource[i].start != d->resource[i].start ||
e->resource[i].end != d->resource[i].end ||
e->resource[i].flags != d->resource[i].flags)
continue;
break;
}
if (mn == &b->devices)
return;
}
if (!seen_host_bridge)
return;
printk(KERN_WARNING "PCI: Ignoring ghost devices on bus %02x\n", b->number);
ln = &b->devices;
while (ln->next != &b->devices) {
d = pci_dev_b(ln->next);
if (d->devfn >= mirror) {
list_del(&d->global_list);
list_del(&d->bus_list);
kfree(d);
} else
ln = ln->next;
}
}
/*
* Called after each bus is probed, but before its children
* are examined.
*/
void __devinit pcibios_fixup_bus(struct pci_bus *b)
{
pcibios_fixup_ghosts(b);
pci_read_bridge_bases(b);
}
struct pci_bus * __devinit pcibios_scan_root(int busnum)
{
struct list_head *list;
struct pci_bus *bus;
list_for_each(list, &pci_root_buses) {
bus = pci_bus_b(list);
if (bus->number == busnum) {
/* Already scanned */
return bus;
}
}
printk("PCI: Probing PCI hardware (bus %02x)\n", busnum);
return pci_scan_bus(busnum, pci_root_ops, NULL);
}
static int __init pcibios_init(void)
{
if (!pci_root_ops) {
printk("PCI: System does not support PCI\n");
return 0;
}
pcibios_resource_survey();
/* may eventually need to do ACPI sort here. */
return 0;
}
subsys_initcall(pcibios_init);
char * __devinit pcibios_setup(char *str)
{
if (!strcmp(str, "off")) {
pci_probe = 0;
return NULL;
}
#ifdef CONFIG_PCI_DIRECT
else if (!strcmp(str, "conf1")) {
pci_probe = PCI_PROBE_CONF1 | PCI_NO_CHECKS;
return NULL;
}
else if (!strcmp(str, "conf2")) {
pci_probe = PCI_PROBE_CONF2 | PCI_NO_CHECKS;
return NULL;
}
#endif
#ifdef CONFIG_ACPI_PCI
else if (!strcmp(str, "noacpi")) {
pci_probe |= PCI_NO_ACPI_ROUTING;
return NULL;
}
#endif
else if (!strcmp(str, "rom")) {
pci_probe |= PCI_ASSIGN_ROMS;
return NULL;
} else if (!strcmp(str, "assign-busses")) {
pci_probe |= PCI_ASSIGN_ALL_BUSSES;
return NULL;
} else if (!strcmp(str, "usepirqmask")) {
pci_probe |= PCI_USE_PIRQ_MASK;
return NULL;
} else if (!strncmp(str, "irqmask=", 8)) {
pcibios_irq_mask = simple_strtol(str+8, NULL, 0);
return NULL;
} else if (!strncmp(str, "lastbus=", 8)) {
pcibios_last_bus = simple_strtol(str+8, NULL, 0);
return NULL;
}
return str;
}
unsigned int pcibios_assign_all_busses(void)
{
return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0;
}
int pcibios_enable_device(struct pci_dev *dev)
{
int err;
if ((err = pcibios_enable_resources(dev)) < 0)
return err;
return pcibios_enable_irq(dev);
}
/* /*
* Low-Level PCI Support for PC * direct.c - Low-level direct PCI config space access
*
* (c) 1999--2000 Martin Mares <mj@ucw.cz>
* 2001 Andi Kleen. Cleanup for x86-64. Removed PCI-BIOS access and fixups
* for hardware that is unlikely to exist on any Hammer platform.
*
* On x86-64 we don't have any access to the PCI-BIOS in long mode, so we
* cannot sort the pci device table based on what the BIOS did. This might
* change the probing order of some devices compared to an i386 kernel.
* May need to use ACPI to fix this.
*/ */
#include <linux/config.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/ioport.h> #include "pci.h"
#include <asm/segment.h> /*
#include <asm/io.h> * Functions for accessing PCI configuration space with type 1 accesses
*/
#include "pci-x86_64.h" #define PCI_CONF1_ADDRESS(bus, dev, fn, reg) \
(0x80000000 | (bus << 16) | (dev << 11) | (fn << 8) | (reg & ~3))
unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2; static int pci_conf1_read (int seg, int bus, int dev, int fn, int reg, int len, u32 *value)
{
unsigned long flags;
int pcibios_last_bus = -1; if (!value || (bus > 255) || (dev > 31) || (fn > 7) || (reg > 255))
struct pci_bus *pci_root_bus; return -EINVAL;
struct pci_ops *pci_root_ops;
/* spin_lock_irqsave(&pci_config_lock, flags);
* Direct access to PCI hardware...
*/
#ifdef CONFIG_PCI_DIRECT outl(PCI_CONF1_ADDRESS(bus, dev, fn, reg), 0xCF8);
switch (len) {
case 1:
*value = inb(0xCFC + (reg & 3));
break;
case 2:
*value = inw(0xCFC + (reg & 2));
break;
case 4:
*value = inl(0xCFC);
break;
}
spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
static int pci_conf1_write (int seg, int bus, int dev, int fn, int reg, int len, u32 value)
{
unsigned long flags;
if ((bus > 255) || (dev > 31) || (fn > 7) || (reg > 255))
return -EINVAL;
spin_lock_irqsave(&pci_config_lock, flags);
outl(PCI_CONF1_ADDRESS(bus, dev, fn, reg), 0xCF8);
switch (len) {
case 1:
outb((u8)value, 0xCFC + (reg & 3));
break;
case 2:
outw((u16)value, 0xCFC + (reg & 2));
break;
case 4:
outl((u32)value, 0xCFC);
break;
}
spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
/*
* Functions for accessing PCI configuration space with type 1 accesses
*/
#define CONFIG_CMD(dev, where) (0x80000000 | (dev->bus->number << 16) | (dev->devfn << 8) | (where & ~3)) #undef PCI_CONF1_ADDRESS
static int pci_conf1_read_config_byte(struct pci_dev *dev, int where, u8 *value) static int pci_conf1_read_config_byte(struct pci_dev *dev, int where, u8 *value)
{ {
outl(CONFIG_CMD(dev,where), 0xCF8); int result;
*value = inb(0xCFC + (where&3)); u32 data;
return PCIBIOS_SUCCESSFUL;
if (!value)
return -EINVAL;
result = pci_conf1_read(0, dev->bus->number, PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn), where, 1, &data);
*value = (u8)data;
return result;
} }
static int pci_conf1_read_config_word(struct pci_dev *dev, int where, u16 *value) static int pci_conf1_read_config_word(struct pci_dev *dev, int where, u16 *value)
{ {
outl(CONFIG_CMD(dev,where), 0xCF8); int result;
*value = inw(0xCFC + (where&2)); u32 data;
return PCIBIOS_SUCCESSFUL;
if (!value)
return -EINVAL;
result = pci_conf1_read(0, dev->bus->number, PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn), where, 2, &data);
*value = (u16)data;
return result;
} }
static int pci_conf1_read_config_dword(struct pci_dev *dev, int where, u32 *value) static int pci_conf1_read_config_dword(struct pci_dev *dev, int where, u32 *value)
{ {
outl(CONFIG_CMD(dev,where), 0xCF8); if (!value)
*value = inl(0xCFC); return -EINVAL;
return PCIBIOS_SUCCESSFUL;
return pci_conf1_read(0, dev->bus->number, PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn), where, 4, value);
} }
static int pci_conf1_write_config_byte(struct pci_dev *dev, int where, u8 value) static int pci_conf1_write_config_byte(struct pci_dev *dev, int where, u8 value)
{ {
outl(CONFIG_CMD(dev,where), 0xCF8); return pci_conf1_write(0, dev->bus->number, PCI_SLOT(dev->devfn),
outb(value, 0xCFC + (where&3)); PCI_FUNC(dev->devfn), where, 1, value);
return PCIBIOS_SUCCESSFUL;
} }
static int pci_conf1_write_config_word(struct pci_dev *dev, int where, u16 value) static int pci_conf1_write_config_word(struct pci_dev *dev, int where, u16 value)
{ {
outl(CONFIG_CMD(dev,where), 0xCF8); return pci_conf1_write(0, dev->bus->number, PCI_SLOT(dev->devfn),
outw(value, 0xCFC + (where&2)); PCI_FUNC(dev->devfn), where, 2, value);
return PCIBIOS_SUCCESSFUL;
} }
static int pci_conf1_write_config_dword(struct pci_dev *dev, int where, u32 value) static int pci_conf1_write_config_dword(struct pci_dev *dev, int where, u32 value)
{ {
outl(CONFIG_CMD(dev,where), 0xCF8); return pci_conf1_write(0, dev->bus->number, PCI_SLOT(dev->devfn),
outl(value, 0xCFC); PCI_FUNC(dev->devfn), where, 4, value);
return PCIBIOS_SUCCESSFUL;
} }
#undef CONFIG_CMD
static struct pci_ops pci_direct_conf1 = { static struct pci_ops pci_direct_conf1 = {
pci_conf1_read_config_byte, pci_conf1_read_config_byte,
pci_conf1_read_config_word, pci_conf1_read_config_word,
...@@ -95,68 +140,127 @@ static struct pci_ops pci_direct_conf1 = { ...@@ -95,68 +140,127 @@ static struct pci_ops pci_direct_conf1 = {
pci_conf1_write_config_dword pci_conf1_write_config_dword
}; };
/* /*
* Functions for accessing PCI configuration space with type 2 accesses * Functions for accessing PCI configuration space with type 2 accesses
*/ */
#define IOADDR(devfn, where) ((0xC000 | ((devfn & 0x78) << 5)) + where) #define PCI_CONF2_ADDRESS(dev, reg) (u16)(0xC000 | (dev << 8) | reg)
#define FUNC(devfn) (((devfn & 7) << 1) | 0xf0)
#define SET(dev) if (dev->devfn & 0x80) return PCIBIOS_DEVICE_NOT_FOUND; \
outb(FUNC(dev->devfn), 0xCF8); \
outb(dev->bus->number, 0xCFA);
static int pci_conf2_read_config_byte(struct pci_dev *dev, int where, u8 *value) static int pci_conf2_read (int seg, int bus, int dev, int fn, int reg, int len, u32 *value)
{ {
SET(dev); unsigned long flags;
*value = inb(IOADDR(dev->devfn,where));
if (!value || (bus > 255) || (dev > 31) || (fn > 7) || (reg > 255))
return -EINVAL;
if (dev & 0x10)
return PCIBIOS_DEVICE_NOT_FOUND;
spin_lock_irqsave(&pci_config_lock, flags);
outb((u8)(0xF0 | (fn << 1)), 0xCF8);
outb((u8)bus, 0xCFA);
switch (len) {
case 1:
*value = inb(PCI_CONF2_ADDRESS(dev, reg));
break;
case 2:
*value = inw(PCI_CONF2_ADDRESS(dev, reg));
break;
case 4:
*value = inl(PCI_CONF2_ADDRESS(dev, reg));
break;
}
outb (0, 0xCF8); outb (0, 0xCF8);
return PCIBIOS_SUCCESSFUL;
spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
} }
static int pci_conf2_read_config_word(struct pci_dev *dev, int where, u16 *value) static int pci_conf2_write (int seg, int bus, int dev, int fn, int reg, int len, u32 value)
{ {
SET(dev); unsigned long flags;
*value = inw(IOADDR(dev->devfn,where));
if ((bus > 255) || (dev > 31) || (fn > 7) || (reg > 255))
return -EINVAL;
if (dev & 0x10)
return PCIBIOS_DEVICE_NOT_FOUND;
spin_lock_irqsave(&pci_config_lock, flags);
outb((u8)(0xF0 | (fn << 1)), 0xCF8);
outb((u8)bus, 0xCFA);
switch (len) {
case 1:
outb ((u8)value, PCI_CONF2_ADDRESS(dev, reg));
break;
case 2:
outw ((u16)value, PCI_CONF2_ADDRESS(dev, reg));
break;
case 4:
outl ((u32)value, PCI_CONF2_ADDRESS(dev, reg));
break;
}
outb (0, 0xCF8); outb (0, 0xCF8);
return PCIBIOS_SUCCESSFUL;
spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
#undef PCI_CONF2_ADDRESS
static int pci_conf2_read_config_byte(struct pci_dev *dev, int where, u8 *value)
{
int result;
u32 data;
result = pci_conf2_read(0, dev->bus->number, PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn), where, 1, &data);
*value = (u8)data;
return result;
}
static int pci_conf2_read_config_word(struct pci_dev *dev, int where, u16 *value)
{
int result;
u32 data;
result = pci_conf2_read(0, dev->bus->number, PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn), where, 2, &data);
*value = (u16)data;
return result;
} }
static int pci_conf2_read_config_dword(struct pci_dev *dev, int where, u32 *value) static int pci_conf2_read_config_dword(struct pci_dev *dev, int where, u32 *value)
{ {
SET(dev); return pci_conf2_read(0, dev->bus->number, PCI_SLOT(dev->devfn),
*value = inl (IOADDR(dev->devfn,where)); PCI_FUNC(dev->devfn), where, 4, value);
outb (0, 0xCF8);
return PCIBIOS_SUCCESSFUL;
} }
static int pci_conf2_write_config_byte(struct pci_dev *dev, int where, u8 value) static int pci_conf2_write_config_byte(struct pci_dev *dev, int where, u8 value)
{ {
SET(dev); return pci_conf2_write(0, dev->bus->number, PCI_SLOT(dev->devfn),
outb (value, IOADDR(dev->devfn,where)); PCI_FUNC(dev->devfn), where, 1, value);
outb (0, 0xCF8);
return PCIBIOS_SUCCESSFUL;
} }
static int pci_conf2_write_config_word(struct pci_dev *dev, int where, u16 value) static int pci_conf2_write_config_word(struct pci_dev *dev, int where, u16 value)
{ {
SET(dev); return pci_conf2_write(0, dev->bus->number, PCI_SLOT(dev->devfn),
outw (value, IOADDR(dev->devfn,where)); PCI_FUNC(dev->devfn), where, 2, value);
outb (0, 0xCF8);
return PCIBIOS_SUCCESSFUL;
} }
static int pci_conf2_write_config_dword(struct pci_dev *dev, int where, u32 value) static int pci_conf2_write_config_dword(struct pci_dev *dev, int where, u32 value)
{ {
SET(dev); return pci_conf2_write(0, dev->bus->number, PCI_SLOT(dev->devfn),
outl (value, IOADDR(dev->devfn,where)); PCI_FUNC(dev->devfn), where, 4, value);
outb (0, 0xCF8);
return PCIBIOS_SUCCESSFUL;
} }
#undef SET
#undef IOADDR
#undef FUNC
static struct pci_ops pci_direct_conf2 = { static struct pci_ops pci_direct_conf2 = {
pci_conf2_read_config_byte, pci_conf2_read_config_byte,
pci_conf2_read_config_word, pci_conf2_read_config_word,
...@@ -166,6 +270,7 @@ static struct pci_ops pci_direct_conf2 = { ...@@ -166,6 +270,7 @@ static struct pci_ops pci_direct_conf2 = {
pci_conf2_write_config_dword pci_conf2_write_config_dword
}; };
/* /*
* Before we decide to use direct hardware access mechanisms, we try to do some * Before we decide to use direct hardware access mechanisms, we try to do some
* trivial checks to ensure it at least _seems_ to be working -- we just test * trivial checks to ensure it at least _seems_ to be working -- we just test
...@@ -214,8 +319,9 @@ static struct pci_ops * __devinit pci_check_direct(void) ...@@ -214,8 +319,9 @@ static struct pci_ops * __devinit pci_check_direct(void)
pci_sanity_check(&pci_direct_conf1)) { pci_sanity_check(&pci_direct_conf1)) {
outl (tmp, 0xCF8); outl (tmp, 0xCF8);
__restore_flags(flags); __restore_flags(flags);
printk("PCI: Using configuration type 1\n"); printk(KERN_INFO "PCI: Using configuration type 1\n");
request_region(0xCF8, 8, "PCI conf1"); if (!request_region(0xCF8, 8, "PCI conf1"))
return NULL;
return &pci_direct_conf1; return &pci_direct_conf1;
} }
outl (tmp, 0xCF8); outl (tmp, 0xCF8);
...@@ -231,8 +337,9 @@ static struct pci_ops * __devinit pci_check_direct(void) ...@@ -231,8 +337,9 @@ static struct pci_ops * __devinit pci_check_direct(void)
if (inb (0xCF8) == 0x00 && inb (0xCFA) == 0x00 && if (inb (0xCF8) == 0x00 && inb (0xCFA) == 0x00 &&
pci_sanity_check(&pci_direct_conf2)) { pci_sanity_check(&pci_direct_conf2)) {
__restore_flags(flags); __restore_flags(flags);
printk("PCI: Using configuration type 2\n"); printk(KERN_INFO "PCI: Using configuration type 2\n");
request_region(0xCF8, 4, "PCI conf2"); if (!request_region(0xCF8, 4, "PCI conf2"))
return NULL;
return &pci_direct_conf2; return &pci_direct_conf2;
} }
} }
...@@ -241,207 +348,20 @@ static struct pci_ops * __devinit pci_check_direct(void) ...@@ -241,207 +348,20 @@ static struct pci_ops * __devinit pci_check_direct(void)
return NULL; return NULL;
} }
#endif static int __init pci_direct_init(void)
/*
* Several buggy motherboards address only 16 devices and mirror
* them to next 16 IDs. We try to detect this `feature' on all
* primary buses (those containing host bridges as they are
* expected to be unique) and remove the ghost devices.
*/
static void __devinit pcibios_fixup_ghosts(struct pci_bus *b)
{
struct list_head *ln, *mn;
struct pci_dev *d, *e;
int mirror = PCI_DEVFN(16,0);
int seen_host_bridge = 0;
int i;
DBG("PCI: Scanning for ghost devices on bus %d\n", b->number);
for (ln=b->devices.next; ln != &b->devices; ln=ln->next) {
d = pci_dev_b(ln);
if ((d->class >> 8) == PCI_CLASS_BRIDGE_HOST)
seen_host_bridge++;
for (mn=ln->next; mn != &b->devices; mn=mn->next) {
e = pci_dev_b(mn);
if (e->devfn != d->devfn + mirror ||
e->vendor != d->vendor ||
e->device != d->device ||
e->class != d->class)
continue;
for(i=0; i<PCI_NUM_RESOURCES; i++)
if (e->resource[i].start != d->resource[i].start ||
e->resource[i].end != d->resource[i].end ||
e->resource[i].flags != d->resource[i].flags)
continue;
break;
}
if (mn == &b->devices)
return;
}
if (!seen_host_bridge)
return;
printk("PCI: Ignoring ghost devices on bus %02x\n", b->number);
ln = &b->devices;
while (ln->next != &b->devices) {
d = pci_dev_b(ln->next);
if (d->devfn >= mirror) {
list_del(&d->global_list);
list_del(&d->bus_list);
kfree(d);
} else
ln = ln->next;
}
}
/*
* Discover remaining PCI buses in case there are peer host bridges.
* We use the number of last PCI bus provided by the PCI BIOS.
*/
static void __devinit pcibios_fixup_peer_bridges(void)
{ {
int n; if ((pci_probe & (PCI_PROBE_CONF1 | PCI_PROBE_CONF2))
struct pci_bus bus; && (pci_root_ops = pci_check_direct())) {
struct pci_dev dev; if (pci_root_ops == &pci_direct_conf1) {
u16 l; pci_config_read = pci_conf1_read;
pci_config_write = pci_conf1_write;
if (pcibios_last_bus <= 0 || pcibios_last_bus >= 0xff)
return;
DBG("PCI: Peer bridge fixup\n");
for (n=0; n <= pcibios_last_bus; n++) {
if (pci_bus_exists(&pci_root_buses, n))
continue;
bus.number = n;
bus.ops = pci_root_ops;
dev.bus = &bus;
for(dev.devfn=0; dev.devfn<256; dev.devfn += 8)
if (!pci_read_config_word(&dev, PCI_VENDOR_ID, &l) &&
l != 0x0000 && l != 0xffff) {
DBG("Found device at %02x:%02x [%04x]\n", n, dev.devfn, l);
printk("PCI: Discovered peer bus %02x\n", n);
pci_scan_bus(n, pci_root_ops, NULL);
break;
} }
}
}
struct pci_fixup pcibios_fixups[] = {
/* Currently no fixup for hammer systems. May need to readd them
as needed. */
{ 0 }
};
/*
* Called after each bus is probed, but before its children
* are examined.
*/
void __devinit pcibios_fixup_bus(struct pci_bus *b)
{
pcibios_fixup_ghosts(b);
pci_read_bridge_bases(b);
}
/*
* Initialization. Try all known PCI access methods. Note that we support
* using both PCI BIOS and direct access: in such cases, we use I/O ports
* to access config space, but we still keep BIOS order of cards to be
* compatible with 2.0.X. This should go away some day.
*/
void __devinit pcibios_init(void)
{
struct pci_ops *bios = NULL;
struct pci_ops *dir = NULL;
#ifdef CONFIG_PCI_DIRECT
if (pci_probe & (PCI_PROBE_CONF1 | PCI_PROBE_CONF2))
dir = pci_check_direct();
#endif
if (dir)
pci_root_ops = dir;
else if (bios)
pci_root_ops = bios;
else { else {
printk("PCI: No PCI bus detected\n"); pci_config_read = pci_conf2_read;
return; pci_config_write = pci_conf2_write;
}
printk("PCI: Probing PCI hardware\n");
pci_root_bus = pci_scan_bus(0, pci_root_ops, NULL);
pcibios_irq_init();
pcibios_fixup_peer_bridges();
pcibios_fixup_irqs();
pcibios_resource_survey();
}
char * __devinit pcibios_setup(char *str)
{
if (!strcmp(str, "off")) {
pci_probe = 0;
return NULL;
} }
else if (!strncmp(str, "bios", 4)) {
printk("PCI: No PCI bios access on x86-64. BIOS hint ignored.\n");
return NULL;
} else if (!strcmp(str, "nobios")) {
pci_probe &= ~PCI_PROBE_BIOS;
return NULL;
} else if (!strcmp(str, "nosort")) { /* Default */
pci_probe |= PCI_NO_SORT;
return NULL;
} }
#ifdef CONFIG_PCI_DIRECT
else if (!strcmp(str, "conf1")) {
pci_probe = PCI_PROBE_CONF1 | PCI_NO_CHECKS;
return NULL;
}
else if (!strcmp(str, "conf2")) {
pci_probe = PCI_PROBE_CONF2 | PCI_NO_CHECKS;
return NULL;
}
#endif
#ifdef CONFIG_ACPI_PCI
else if (!strcmp(str, "noacpi")) {
pci_probe |= PCI_NO_ACPI_ROUTING;
return NULL;
}
#endif
else if (!strcmp(str, "rom")) {
pci_probe |= PCI_ASSIGN_ROMS;
return NULL;
} else if (!strcmp(str, "assign-busses")) {
pci_probe |= PCI_ASSIGN_ALL_BUSSES;
return NULL;
} else if (!strncmp(str, "irqmask=", 8)) {
pcibios_irq_mask = simple_strtol(str+8, NULL, 0);
return NULL;
} else if (!strncmp(str, "lastbus=", 8)) {
pcibios_last_bus = simple_strtol(str+8, NULL, 0);
return NULL;
} else if (!strcmp(str, "usepirqmask")) {
pci_probe |= PCI_USE_PIRQ_MASK;
return NULL;
}
return str;
}
unsigned int pcibios_assign_all_busses(void)
{
return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0;
}
int pcibios_enable_device(struct pci_dev *dev)
{
int err;
if ((err = pcibios_enable_resources(dev)) < 0)
return err;
pcibios_enable_irq(dev);
return 0; return 0;
} }
arch_initcall(pci_direct_init);
/*
* Exceptions for specific devices. Usually work-arounds for fatal design flaws.
*
Short list on x86-64........so far.
*/
#include <linux/pci.h>
#include <linux/init.h>
#include "pci.h"
static void __devinit pci_fixup_ncr53c810(struct pci_dev *d)
{
/*
* NCR 53C810 returns class code 0 (at least on some systems).
* Fix class to be PCI_CLASS_STORAGE_SCSI
*/
if (!d->class) {
printk(KERN_WARNING "PCI: fixing NCR 53C810 class code for %s\n", d->slot_name);
d->class = PCI_CLASS_STORAGE_SCSI << 8;
}
}
static void __devinit pci_fixup_ide_bases(struct pci_dev *d)
{
int i;
/*
* PCI IDE controllers use non-standard I/O port decoding, respect it.
*/
if ((d->class >> 8) != PCI_CLASS_STORAGE_IDE)
return;
DBG("PCI: IDE base address fixup for %s\n", d->slot_name);
for(i=0; i<4; i++) {
struct resource *r = &d->resource[i];
if ((r->start & ~0x80) == 0x374) {
r->start |= 2;
r->end = r->start;
}
}
}
static void __devinit pci_fixup_ide_trash(struct pci_dev *d)
{
int i;
/*
* There exist PCI IDE controllers which have utter garbage
* in first four base registers. Ignore that.
*/
DBG("PCI: IDE base address trash cleared for %s\n", d->slot_name);
for(i=0; i<4; i++)
d->resource[i].start = d->resource[i].end = d->resource[i].flags = 0;
}
struct pci_fixup pcibios_fixups[] = {
{ PCI_FIXUP_HEADER, PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, pci_fixup_ncr53c810 },
{ 0 }
};
...@@ -16,12 +16,12 @@ ...@@ -16,12 +16,12 @@
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/io_apic.h> #include <asm/io_apic.h>
#include "pci-x86_64.h" #include "pci.h"
#define PIRQ_SIGNATURE (('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24)) #define PIRQ_SIGNATURE (('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24))
#define PIRQ_VERSION 0x0100 #define PIRQ_VERSION 0x0100
int pci_use_acpi_routing = 0; int broken_hp_bios_irq9;
static struct irq_routing_table *pirq_table; static struct irq_routing_table *pirq_table;
...@@ -44,6 +44,8 @@ struct irq_router { ...@@ -44,6 +44,8 @@ struct irq_router {
int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq, int new); int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq, int new);
}; };
int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL;
/* /*
* Search 0xf0000 -- 0xfffff for the PCI IRQ Routing Table. * Search 0xf0000 -- 0xfffff for the PCI IRQ Routing Table.
*/ */
...@@ -77,9 +79,6 @@ static struct irq_routing_table * __init pirq_find_routing_table(void) ...@@ -77,9 +79,6 @@ static struct irq_routing_table * __init pirq_find_routing_table(void)
* If we have a IRQ routing table, use it to search for peer host * If we have a IRQ routing table, use it to search for peer host
* bridges. It's a gross hack, but since there are no other known * bridges. It's a gross hack, but since there are no other known
* ways how to get a list of buses, we have to go this way. * ways how to get a list of buses, we have to go this way.
*
* [maybe x86-64 architecture should define way to query this info in
more reasonable way?]
*/ */
static void __init pirq_peer_trick(void) static void __init pirq_peer_trick(void)
...@@ -117,7 +116,7 @@ static void __init pirq_peer_trick(void) ...@@ -117,7 +116,7 @@ static void __init pirq_peer_trick(void)
* Code for querying and setting of IRQ routes on various interrupt routers. * Code for querying and setting of IRQ routes on various interrupt routers.
*/ */
static void eisa_set_level_irq(unsigned int irq) void eisa_set_level_irq(unsigned int irq)
{ {
unsigned char mask = 1 << (irq & 7); unsigned char mask = 1 << (irq & 7);
unsigned int port = 0x4d0 + (irq >> 3); unsigned int port = 0x4d0 + (irq >> 3);
...@@ -176,24 +175,6 @@ static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i ...@@ -176,24 +175,6 @@ static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
return 0; return 0;
} }
/*
* The Intel PIIX4 pirq rules are fairly simple: "pirq" is
* just a pointer to the config space.
*/
static int pirq_piix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
u8 x;
pci_read_config_byte(router, pirq, &x);
return (x < 16) ? x : 0;
}
static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
pci_write_config_byte(router, pirq, irq);
return 1;
}
/* /*
* The VIA pirq rules are nibble-based, like ALI, * The VIA pirq rules are nibble-based, like ALI,
* but without the ugly irq number munging. * but without the ugly irq number munging.
...@@ -209,35 +190,6 @@ static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i ...@@ -209,35 +190,6 @@ static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
return 1; return 1;
} }
/*
* OPTI: high four bits are nibble pointer..
* I wonder what the low bits do?
*/
static int pirq_opti_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
return read_config_nybble(router, 0xb8, pirq >> 4);
}
static int pirq_opti_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
write_config_nybble(router, 0xb8, pirq >> 4, irq);
return 1;
}
/*
* Cyrix: nibble offset 0x5C
*/
static int pirq_cyrix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
return read_config_nybble(router, 0x5C, pirq-1);
}
static int pirq_cyrix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
write_config_nybble(router, 0x5C, pirq-1, irq);
return 1;
}
/* /*
* PIRQ routing for SiS 85C503 router used in several SiS chipsets * PIRQ routing for SiS 85C503 router used in several SiS chipsets
* According to the SiS 5595 datasheet (preliminary V1.0, 12/24/1997) * According to the SiS 5595 datasheet (preliminary V1.0, 12/24/1997)
...@@ -344,57 +296,7 @@ static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i ...@@ -344,57 +296,7 @@ static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
return 1; return 1;
} }
/* #if 0 /* kept as reference */
* VLSI: nibble offset 0x74 - educated guess due to routing table and
* config space of VLSI 82C534 PCI-bridge/router (1004:0102)
* Tested on HP OmniBook 800 covering PIRQ 1, 2, 4, 8 for onboard
* devices, PIRQ 3 for non-pci(!) soundchip and (untested) PIRQ 6
* for the busbridge to the docking station.
*/
static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
if (pirq > 8) {
printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
return 0;
}
return read_config_nybble(router, 0x74, pirq-1);
}
static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
if (pirq > 8) {
printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
return 0;
}
write_config_nybble(router, 0x74, pirq-1, irq);
return 1;
}
/*
* ServerWorks: PCI interrupts mapped to system IRQ lines through Index
* and Redirect I/O registers (0x0c00 and 0x0c01). The Index register
* format is (PCIIRQ## | 0x10), e.g.: PCIIRQ10=0x1a. The Redirect
* register is a straight binary coding of desired PIC IRQ (low nibble).
*
* The 'link' value in the PIRQ table is already in the correct format
* for the Index register. There are some special index values:
* 0x00 for ACPI (SCI), 0x01 for USB, 0x02 for IDE0, 0x04 for IDE1,
* and 0x03 for SMBus.
*/
static int pirq_serverworks_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
outb_p(pirq, 0xc00);
return inb(0xc01) & 0xf;
}
static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
outb_p(pirq, 0xc00);
outb_p(irq, 0xc01);
return 1;
}
/* Support for AMD756 PCI IRQ Routing /* Support for AMD756 PCI IRQ Routing
* Jhon H. Caicedo <jhcaiced@osso.org.co> * Jhon H. Caicedo <jhcaiced@osso.org.co>
* Jun/21/2001 0.2.0 Release, fixed to use "nybble" functions... (jhcaiced) * Jun/21/2001 0.2.0 Release, fixed to use "nybble" functions... (jhcaiced)
...@@ -402,6 +304,8 @@ static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev, int ...@@ -402,6 +304,8 @@ static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev, int
* The AMD756 pirq rules are nibble-based * The AMD756 pirq rules are nibble-based
* offset 0x56 0-3 PIRQA 4-7 PIRQB * offset 0x56 0-3 PIRQA 4-7 PIRQB
* offset 0x57 0-3 PIRQC 4-7 PIRQD * offset 0x57 0-3 PIRQC 4-7 PIRQD
*
* AMD8111 is similar NIY.
*/ */
static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq) static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{ {
...@@ -426,47 +330,13 @@ static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq ...@@ -426,47 +330,13 @@ static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq
} }
return 1; return 1;
} }
#ifdef CONFIG_PCI_BIOS
static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
struct pci_dev *bridge;
int pin = pci_get_interrupt_pin(dev, &bridge);
return pcibios_set_irq_routing(bridge, pin, irq);
}
static struct irq_router pirq_bios_router =
{ "BIOS", 0, 0, NULL, pirq_bios_set };
#endif #endif
static struct irq_router pirq_routers[] = { static struct irq_router pirq_routers[] = {
{ "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371FB_0, pirq_piix_get, pirq_piix_set },
{ "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, pirq_piix_get, pirq_piix_set },
{ "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0, pirq_piix_get, pirq_piix_set },
{ "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371MX, pirq_piix_get, pirq_piix_set },
{ "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_0, pirq_piix_get, pirq_piix_set },
{ "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, pirq_piix_get, pirq_piix_set },
{ "ALI", PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, pirq_ali_get, pirq_ali_set },
{ "VIA", PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, pirq_via_get, pirq_via_set }, { "VIA", PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, pirq_via_get, pirq_via_set },
{ "VIA", PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, pirq_via_get, pirq_via_set }, { "VIA", PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, pirq_via_get, pirq_via_set },
{ "VIA", PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, pirq_via_get, pirq_via_set }, { "VIA", PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, pirq_via_get, pirq_via_set },
{ "OPTI", PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C700, pirq_opti_get, pirq_opti_set },
{ "NatSemi", PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, pirq_cyrix_get, pirq_cyrix_set },
{ "SIS", PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, pirq_sis_get, pirq_sis_set }, { "SIS", PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, pirq_sis_get, pirq_sis_set },
{ "VLSI 82C534", PCI_VENDOR_ID_VLSI, PCI_DEVICE_ID_VLSI_82C534, pirq_vlsi_get, pirq_vlsi_set },
{ "ServerWorks", PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4,
pirq_serverworks_get, pirq_serverworks_set },
{ "ServerWorks", PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5,
pirq_serverworks_get, pirq_serverworks_set },
{ "AMD756 VIPER", PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_740B,
pirq_amd756_get, pirq_amd756_set },
{ "default", 0, 0, NULL, NULL } { "default", 0, 0, NULL, NULL }
}; };
...@@ -532,54 +402,6 @@ static void pcibios_test_irq_handler(int irq, void *dev_id, struct pt_regs *regs ...@@ -532,54 +402,6 @@ static void pcibios_test_irq_handler(int irq, void *dev_id, struct pt_regs *regs
{ {
} }
#ifdef CONFIG_ACPI_PCI
static int acpi_lookup_irq (
struct pci_dev *dev,
u8 pin,
int assign)
{
int result = 0;
int irq = 0;
/* TBD: Select IRQ from possible to improve routing performance. */
result = acpi_prt_get_irq(dev, pin, &irq);
if (!irq)
result = -ENODEV;
if (0 != result) {
printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of device %s\n",
'A'+pin, dev->slot_name);
return result;
}
dev->irq = irq;
if (!assign) {
/* only check for the IRQ */
printk(KERN_INFO "PCI: Found IRQ %d for device %s\n", irq,
dev->slot_name);
return 1;
}
/* also assign an IRQ */
if (irq && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
result = acpi_prt_set_irq(dev, pin, irq);
if (0 != result) {
printk(KERN_WARNING "PCI: Could not assign IRQ %d to device %s\n", irq, dev->slot_name);
return result;
}
eisa_set_level_irq(irq);
printk(KERN_INFO "PCI: Assigned IRQ %d for device %s\n", irq, dev->slot_name);
}
return 1;
}
#endif /* CONFIG_ACPI_PCI */
static int pcibios_lookup_irq(struct pci_dev *dev, int assign) static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
{ {
u8 pin; u8 pin;
...@@ -599,12 +421,6 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) ...@@ -599,12 +421,6 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
} }
pin = pin - 1; pin = pin - 1;
#ifdef CONFIG_ACPI_PCI
/* Use ACPI to lookup IRQ */
if (pci_use_acpi_routing)
return acpi_lookup_irq(dev, pin, assign);
#endif
/* Find IRQ routing entry */ /* Find IRQ routing entry */
if (!pirq_table) if (!pirq_table)
...@@ -625,6 +441,15 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) ...@@ -625,6 +441,15 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask, pirq_table->exclusive_irqs); DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask, pirq_table->exclusive_irqs);
mask &= pcibios_irq_mask; mask &= pcibios_irq_mask;
/* Work around broken HP Pavilion Notebooks which assign USB to
IRQ 9 even though it is actually wired to IRQ 11 */
if (broken_hp_bios_irq9 && pirq == 0x59 && dev->irq == 9) {
dev->irq = 11;
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11);
r->set(pirq_router_dev, dev, pirq, 11);
}
/* /*
* Find the best IRQ to assign: use the one * Find the best IRQ to assign: use the one
* reported by the device if possible. * reported by the device if possible.
...@@ -703,28 +528,15 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) ...@@ -703,28 +528,15 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
return 1; return 1;
} }
void __init pcibios_irq_init(void) static int __init pcibios_irq_init(void)
{ {
DBG("PCI: IRQ init\n"); DBG("PCI: IRQ init\n");
#ifdef CONFIG_ACPI_PCI if (pcibios_enable_irq)
if (!(pci_probe & PCI_NO_ACPI_ROUTING)) { return 0;
if (acpi_prts.count) {
printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
pci_use_acpi_routing = 1;
return;
}
else
printk(KERN_WARNING "PCI: Invalid ACPI-PCI IRQ routing table\n");
}
#endif
pirq_table = pirq_find_routing_table(); pirq_table = pirq_find_routing_table();
#ifdef CONFIG_PCI_BIOS
if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN))
pirq_table = pcibios_get_irq_routing_table();
#endif
if (pirq_table) { if (pirq_table) {
pirq_peer_trick(); pirq_peer_trick();
pirq_find_router(); pirq_find_router();
...@@ -738,8 +550,15 @@ void __init pcibios_irq_init(void) ...@@ -738,8 +550,15 @@ void __init pcibios_irq_init(void)
if (io_apic_assign_pci_irqs) if (io_apic_assign_pci_irqs)
pirq_table = NULL; pirq_table = NULL;
} }
pcibios_enable_irq = pirq_enable_irq;
pcibios_fixup_irqs();
return 0;
} }
subsys_initcall(pcibios_irq_init);
void __init pcibios_fixup_irqs(void) void __init pcibios_fixup_irqs(void)
{ {
struct pci_dev *dev; struct pci_dev *dev;
...@@ -815,7 +634,7 @@ void pcibios_penalize_isa_irq(int irq) ...@@ -815,7 +634,7 @@ void pcibios_penalize_isa_irq(int irq)
pirq_penalty[irq] += 100; pirq_penalty[irq] += 100;
} }
void pcibios_enable_irq(struct pci_dev *dev) int pirq_enable_irq(struct pci_dev *dev)
{ {
u8 pin; u8 pin;
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
...@@ -823,11 +642,11 @@ void pcibios_enable_irq(struct pci_dev *dev) ...@@ -823,11 +642,11 @@ void pcibios_enable_irq(struct pci_dev *dev)
char *msg; char *msg;
if (io_apic_assign_pci_irqs) if (io_apic_assign_pci_irqs)
msg = " Probably buggy MP table."; msg = " Probably buggy MP table.";
else if (pci_probe & PCI_BIOS_IRQ_SCAN)
msg = "";
else else
msg = " Please try using pci=biosirq."; msg = "";
printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of device %s.%s\n", printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of device %s.%s\n",
'A' + pin - 1, dev->slot_name, msg); 'A' + pin - 1, dev->slot_name, msg);
} }
return 0;
} }
/*
* legacy.c - traditional, old school PCI bus probing
*/
#include <linux/pci.h>
#include <linux/init.h>
#include "pci.h"
/*
* Discover remaining PCI buses in case there are peer host bridges.
* We use the number of last PCI bus provided by the PCI BIOS.
*/
static void __devinit pcibios_fixup_peer_bridges(void)
{
int n;
struct pci_bus bus;
struct pci_dev dev;
u16 l;
if (pcibios_last_bus <= 0 || pcibios_last_bus >= 0xff)
return;
DBG("PCI: Peer bridge fixup\n");
for (n=0; n <= pcibios_last_bus; n++) {
if (pci_bus_exists(&pci_root_buses, n))
continue;
bus.number = n;
bus.ops = pci_root_ops;
dev.bus = &bus;
for(dev.devfn=0; dev.devfn<256; dev.devfn += 8)
if (!pci_read_config_word(&dev, PCI_VENDOR_ID, &l) &&
l != 0x0000 && l != 0xffff) {
DBG("Found device at %02x:%02x [%04x]\n", n, dev.devfn, l);
printk(KERN_INFO "PCI: Discovered peer bus %02x\n", n);
pci_scan_bus(n, pci_root_ops, NULL);
break;
}
}
}
static int __init pci_legacy_init(void)
{
if (!pci_root_ops) {
printk("PCI: System does not support PCI\n");
return 0;
}
if (pcibios_scanned++)
return 0;
printk("PCI: Probing PCI hardware\n");
pci_root_bus = pcibios_scan_root(0);
pcibios_fixup_peer_bridges();
return 0;
}
subsys_initcall(pci_legacy_init);
/* /*
* Low-Level PCI Access for i386 machines. * Low-Level PCI Access for x86-64 machines.
* *
* (c) 1999 Martin Mares <mj@ucw.cz> * (c) 1999 Martin Mares <mj@ucw.cz>
*/ */
...@@ -26,8 +26,6 @@ ...@@ -26,8 +26,6 @@
extern unsigned int pci_probe; extern unsigned int pci_probe;
/* pci-i386.c */
extern unsigned int pcibios_max_latency; extern unsigned int pcibios_max_latency;
void pcibios_resource_survey(void); void pcibios_resource_survey(void);
...@@ -39,9 +37,6 @@ extern int pcibios_last_bus; ...@@ -39,9 +37,6 @@ extern int pcibios_last_bus;
extern struct pci_bus *pci_root_bus; extern struct pci_bus *pci_root_bus;
extern struct pci_ops *pci_root_ops; extern struct pci_ops *pci_root_ops;
struct irq_routing_table *pcibios_get_irq_routing_table(void);
int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
/* pci-irq.c */ /* pci-irq.c */
struct irq_info { struct irq_info {
...@@ -69,8 +64,10 @@ struct irq_routing_table { ...@@ -69,8 +64,10 @@ struct irq_routing_table {
extern unsigned int pcibios_irq_mask; extern unsigned int pcibios_irq_mask;
extern int pci_use_acpi_routing; extern int pcibios_scanned;
extern spinlock_t pci_config_lock;
void pcibios_irq_init(void);
void pcibios_fixup_irqs(void); void pcibios_fixup_irqs(void);
void pcibios_enable_irq(struct pci_dev *dev); int pirq_enable_irq(struct pci_dev *dev);
extern int (*pcibios_enable_irq)(struct pci_dev *dev);
...@@ -22,67 +22,6 @@ ...@@ -22,67 +22,6 @@
* PCI to PCI Bridge Specification * PCI to PCI Bridge Specification
* PCI System Design Guide * PCI System Design Guide
* *
*
* CHANGELOG :
* Jun 17, 1994 : Modified to accommodate the broken pre-PCI BIOS SPECIFICATION
* Revision 2.0 present on <thys@dennis.ee.up.ac.za>'s ASUS mainboard.
*
* Jan 5, 1995 : Modified to probe PCI hardware at boot time by Frederic
* Potter, potter@cao-vlsi.ibp.fr
*
* Jan 10, 1995 : Modified to store the information about configured pci
* devices into a list, which can be accessed via /proc/pci by
* Curtis Varner, cvarner@cs.ucr.edu
*
* Jan 12, 1995 : CPU-PCI bridge optimization support by Frederic Potter.
* Alpha version. Intel & UMC chipset support only.
*
* Apr 16, 1995 : Source merge with the DEC Alpha PCI support. Most of the code
* moved to drivers/pci/pci.c.
*
* Dec 7, 1996 : Added support for direct configuration access of boards
* with Intel compatible access schemes (tsbogend@alpha.franken.de)
*
* Feb 3, 1997 : Set internal functions to static, save/restore flags
* avoid dead locks reading broken PCI BIOS, werner@suse.de
*
* Apr 26, 1997 : Fixed case when there is BIOS32, but not PCI BIOS
* (mj@atrey.karlin.mff.cuni.cz)
*
* May 7, 1997 : Added some missing cli()'s. [mj]
*
* Jun 20, 1997 : Corrected problems in "conf1" type accesses.
* (paubert@iram.es)
*
* Aug 2, 1997 : Split to PCI BIOS handling and direct PCI access parts
* and cleaned it up... Martin Mares <mj@atrey.karlin.mff.cuni.cz>
*
* Feb 6, 1998 : No longer using BIOS to find devices and device classes. [mj]
*
* May 1, 1998 : Support for peer host bridges. [mj]
*
* Jun 19, 1998 : Changed to use spinlocks, so that PCI configuration space
* can be accessed from interrupts even on SMP systems. [mj]
*
* August 1998 : Better support for peer host bridges and more paranoid
* checks for direct hardware access. Ugh, this file starts to look as
* a large gallery of common hardware bug workarounds (watch the comments)
* -- the PCI specs themselves are sane, but most implementors should be
* hit hard with \hammer scaled \magstep5. [mj]
*
* Jan 23, 1999 : More improvements to peer host bridge logic. i450NX fixup. [mj]
*
* Feb 8, 1999 : Added UM8886BF I/O address fixup. [mj]
*
* August 1999 : New resource management and configuration access stuff. [mj]
*
* Sep 19, 1999 : Use PCI IRQ routing tables for detection of peer host bridges.
* Based on ideas by Chris Frantz and David Hinds. [mj]
*
* Sep 28, 1999 : Handle unreported/unassigned IRQs. Thanks to Shuu Yamaguchi
* for a lot of patience during testing. [mj]
*
* Oct 8, 1999 : Split to pci-i386.c, pci-pc.c and pci-visws.c. [mj]
*/ */
#include <linux/types.h> #include <linux/types.h>
...@@ -92,7 +31,7 @@ ...@@ -92,7 +31,7 @@
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/errno.h> #include <linux/errno.h>
#include "pci-x86_64.h" #include "pci.h"
void void
pcibios_update_resource(struct pci_dev *dev, struct resource *root, pcibios_update_resource(struct pci_dev *dev, struct resource *root,
......
...@@ -24,7 +24,6 @@ int main(void) ...@@ -24,7 +24,6 @@ int main(void)
output("#ifndef ASM_OFFSET_H\n"); output("#ifndef ASM_OFFSET_H\n");
output("#define ASM_OFFSET_H 1\n"); output("#define ASM_OFFSET_H 1\n");
// task struct entries needed by entry.S
#define ENTRY(entry) outconst("#define tsk_" #entry " %0", offsetof(struct task_struct, entry)) #define ENTRY(entry) outconst("#define tsk_" #entry " %0", offsetof(struct task_struct, entry))
ENTRY(state); ENTRY(state);
ENTRY(flags); ENTRY(flags);
...@@ -43,24 +42,8 @@ int main(void) ...@@ -43,24 +42,8 @@ int main(void)
ENTRY(irqcount); ENTRY(irqcount);
ENTRY(cpunumber); ENTRY(cpunumber);
ENTRY(irqstackptr); ENTRY(irqstackptr);
ENTRY(__softirq_pending);
ENTRY(__local_irq_count);
ENTRY(__local_bh_count);
ENTRY(__ksoftirqd_task);
ENTRY(level4_pgt);
ENTRY(me);
#undef ENTRY #undef ENTRY
output("#ifdef __ASSEMBLY__");
#define CONST(t) outconst("#define " #t " %0", t)
CONST(TASK_SIZE);
CONST(SIGCHLD);
CONST(CLONE_VFORK);
CONST(CLONE_VM);
#undef CONST
output("#endif");
output("#endif\n"); output("#endif\n");
return(0); return(0);
} }
...@@ -3,8 +3,8 @@ ...@@ -3,8 +3,8 @@
*/ */
OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64") OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
OUTPUT_ARCH(i386:x86-64) OUTPUT_ARCH(i386:x86-64)
jiffies = jiffies_64;
ENTRY(_start) ENTRY(_start)
jiffies_64 = jiffies;
SECTIONS SECTIONS
{ {
. = 0xffffffff80100000; . = 0xffffffff80100000;
......
...@@ -41,15 +41,9 @@ static __inline__ void apic_wait_icr_idle(void) ...@@ -41,15 +41,9 @@ static __inline__ void apic_wait_icr_idle(void)
while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY ); while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY );
} }
#ifdef CONFIG_X86_GOOD_APIC #define FORCE_READ_AROUND_WRITE 0
# define FORCE_READ_AROUND_WRITE 0 #define apic_read_around(x)
# define apic_read_around(x) #define apic_write_around(x,y) apic_write((x),(y))
# define apic_write_around(x,y) apic_write((x),(y))
#else
# define FORCE_READ_AROUND_WRITE 1
# define apic_read_around(x) apic_read(x)
# define apic_write_around(x,y) apic_write_atomic((x),(y))
#endif
static inline void ack_APIC_irq(void) static inline void ack_APIC_irq(void)
{ {
......
...@@ -475,6 +475,9 @@ static __inline__ int ffs(int x) ...@@ -475,6 +475,9 @@ static __inline__ int ffs(int x)
#define minix_find_first_zero_bit(addr,size) \ #define minix_find_first_zero_bit(addr,size) \
find_first_zero_bit((void*)addr,size) find_first_zero_bit((void*)addr,size)
/* find last set bit */
#define fls(x) generic_fls(x)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _X86_64_BITOPS_H */ #endif /* _X86_64_BITOPS_H */
...@@ -18,6 +18,7 @@ extern char x86_boot_params[2048]; ...@@ -18,6 +18,7 @@ extern char x86_boot_params[2048];
#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0)) #define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2)) #define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8)) #define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
#define SAVED_VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC)) #define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF)) #define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210)) #define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
......
#ifndef _X86_64_CHECKSUM_H #ifndef _X86_64_CHECKSUM_H
#define _X86_64_CHECKSUM_H #define _X86_64_CHECKSUM_H
#include <linux/in6.h> /*
* Checksums for x86-64
* Copyright 2002 by Andi Kleen, SuSE Labs
* with some code from asm-i386/checksum.h
*/
#include <linux/compiler.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
/**
* csum_fold - Fold and invert a 32bit checksum.
* sum: 32bit unfolded sum
*
* Fold a 32bit running checksum to 16bit and invert it. This is usually
* the last step before putting a checksum into a packet.
* Make sure not to mix with 64bit checksums.
*/
static inline unsigned int csum_fold(unsigned int sum)
{
__asm__(
" addl %1,%0\n"
" adcl $0xffff,%0"
: "=r" (sum)
: "r" (sum << 16), "0" (sum & 0xffff0000)
);
return (~sum) >> 16;
}
/* /*
* This is a version of ip_compute_csum() optimized for IP headers, * This is a version of ip_compute_csum() optimized for IP headers,
...@@ -10,28 +37,33 @@ ...@@ -10,28 +37,33 @@
* By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
* Arnt Gulbrandsen. * Arnt Gulbrandsen.
*/ */
static inline unsigned short ip_fast_csum(unsigned char * iph,
unsigned int ihl) { /**
* ip_fast_csum - Compute the IPv4 header checksum efficiently.
* iph: ipv4 header
* ihl: length of header / 4
*/
static inline unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl)
{
unsigned int sum; unsigned int sum;
__asm__ __volatile__( asm( " movl (%1), %0\n"
"\n movl (%1), %0" " subl $4, %2\n"
"\n subl $4, %2" " jbe 2f\n"
"\n jbe 2f" " addl 4(%1), %0\n"
"\n addl 4(%1), %0" " adcl 8(%1), %0\n"
"\n adcl 8(%1), %0" " adcl 12(%1), %0\n"
"\n adcl 12(%1), %0" "1: adcl 16(%1), %0\n"
"\n1: adcl 16(%1), %0" " lea 4(%1), %1\n"
"\n lea 4(%1), %1" " decl %2\n"
"\n decl %2" " jne 1b\n"
"\n jne 1b" " adcl $0, %0\n"
"\n adcl $0, %0" " movl %0, %2\n"
"\n movl %0, %2" " shrl $16, %0\n"
"\n shrl $16, %0" " addw %w2, %w0\n"
"\n addw %w2, %w0" " adcl $0, %0\n"
"\n adcl $0, %0" " notl %0\n"
"\n notl %0" "2:"
"\n2:"
/* Since the input registers which are loaded with iph and ipl /* Since the input registers which are loaded with iph and ipl
are modified, we must also specify them as outputs, or gcc are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */ will assume they contain their original values. */
...@@ -40,120 +72,111 @@ static inline unsigned short ip_fast_csum(unsigned char * iph, ...@@ -40,120 +72,111 @@ static inline unsigned short ip_fast_csum(unsigned char * iph,
return(sum); return(sum);
} }
/**
* csum_tcpup_nofold - Compute an IPv4 pseudo header checksum.
/* * @saddr: source address
* Fold a partial checksum. Note this works on a 32bit unfolded checksum. Make sure * @daddr: destination address
* to not mix with 64bit checksums! * @len: length of packet
* @proto: ip protocol of packet
* @sum: initial sum to be added in (32bit unfolded)
*
* Returns the pseudo header checksum the input data. Result is
* 32bit unfolded.
*/ */
static inline unsigned long
static inline unsigned int csum_fold(unsigned int sum) csum_tcpudp_nofold(unsigned saddr, unsigned daddr, unsigned short len,
unsigned short proto, unsigned int sum)
{ {
__asm__( asm(" addl %1, %0\n"
"\n addl %1,%0" " adcl %2, %0\n"
"\n adcl $0xffff,%0" " adcl %3, %0\n"
" adcl $0, %0\n"
: "=r" (sum) : "=r" (sum)
: "r" (sum << 16), "0" (sum & 0xffff0000) : "g" (daddr), "g" (saddr), "g" ((ntohs(len)<<16)+proto*256), "0" (sum));
);
return (~sum) >> 16;
}
static inline unsigned long csum_tcpudp_nofold(unsigned saddr,
unsigned daddr,
unsigned short len,
unsigned short proto,
unsigned int sum)
{
__asm__(
"\n addl %1, %0"
"\n adcl %2, %0"
"\n adcl %3, %0"
"\n adcl $0, %0"
: "=r" (sum)
: "g" (daddr), "g"(saddr), "g"((ntohs(len)<<16)+proto*256), "0"(sum));
return sum; return sum;
} }
/*
* computes the checksum of the TCP/UDP pseudo-header /**
* returns a 16-bit checksum, already complemented * csum_tcpup_magic - Compute an IPv4 pseudo header checksum.
* @saddr: source address
* @daddr: destination address
* @len: length of packet
* @proto: ip protocol of packet
* @sum: initial sum to be added in (32bit unfolded)
*
* Returns the 16bit pseudo header checksum the input data already
* complemented and ready to be filled in.
*/ */
static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, static inline unsigned short int
unsigned long daddr, csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
unsigned short len, unsigned short len, unsigned short proto, unsigned int sum)
unsigned short proto,
unsigned int sum)
{ {
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
} }
/**
/* * csum_partial - Compute an internet checksum.
* computes the checksum of a memory block at buff, length len, * @buff: buffer to be checksummed
* and adds in "sum" (32-bit) * @len: length of buffer.
* * @sum: initial sum to be added in (32bit unfolded)
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
* *
* it's best to have buff aligned on a 32-bit boundary * Returns the 32bit unfolded internet checksum of the buffer.
* Before filling it in it needs to be csum_fold()'ed.
* buff should be aligned to a 64bit boundary if possible.
*/ */
extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); extern unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum);
/* #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
* the same as csum_partial, but copies from src while it #define HAVE_CSUM_COPY_USER 1
* checksums
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
unsigned int csum_partial_copy(const char *src, char *dst, int len, unsigned int sum);
/*
* this is a new version of the above that records errors it finds in *errp,
* but continues and zeros the rest of the buffer.
*/
unsigned int csum_partial_copy_from_user(const char *src, char *dst, int len, unsigned int sum, int *errp);
unsigned int csum_partial_copy_nocheck(const char *src, char *dst, int len, unsigned int sum); /* Do not call this directly. Use the wrappers below */
extern unsigned long csum_partial_copy_generic(const char *src, const char *dst,
unsigned len,
unsigned sum,
int *src_err_ptr, int *dst_err_ptr);
/* extern unsigned int csum_partial_copy_from_user(const char *src, char *dst,
* this routine is used for miscellaneous IP-like checksums, mainly int len, unsigned int isum, int *errp);
* in icmp.c extern unsigned int csum_partial_copy_to_user(const char *src, char *dst,
*/ int len, unsigned int isum, int *errp);
extern unsigned int csum_partial_copy_nocheck(const char *src, char *dst, int len,
unsigned int sum);
/* Old names. To be removed. */
#define csum_and_copy_to_user csum_partial_copy_to_user
#define csum_and_copy_from_user csum_partial_copy_from_user
/**
* ip_compute_csum - Compute an 16bit IP checksum.
* @buff: buffer address.
* @len: length of buffer.
*
* Returns the 16bit folded/inverted checksum of the passed buffer.
* Ready to fill in.
*/
extern unsigned short ip_compute_csum(unsigned char * buff, int len); extern unsigned short ip_compute_csum(unsigned char * buff, int len);
#define _HAVE_ARCH_IPV6_CSUM /**
static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, * csum_ipv6_magic - Compute checksum of an IPv6 pseudo header.
struct in6_addr *daddr, * @saddr: source address
__u32 len, * @daddr: destination address
unsigned short proto, * @len: length of packet
unsigned int sum) * @proto: protocol of packet
{ * @sum: initial sum (32bit unfolded) to be added in
__asm__( *
"\n addl 0(%1), %0" * Computes an IPv6 pseudo header checksum. This sum is added the checksum
"\n adcl 4(%1), %0" * into UDP/TCP packets and contains some link layer information.
"\n adcl 8(%1), %0" * Returns the unfolded 32bit checksum.
"\n adcl 12(%1), %0" */
"\n adcl 0(%2), %0"
"\n adcl 4(%2), %0" struct in6_addr;
"\n adcl 8(%2), %0"
"\n adcl 12(%2), %0" #define _HAVE_ARCH_IPV6_CSUM 1
"\n adcl %3, %0" extern unsigned short
"\n adcl %4, %0" csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
"\n adcl $0, %0" __u32 len, unsigned short proto, unsigned int sum);
: "=&r" (sum)
: "r" (saddr), "r" (daddr),
"r"(htonl(len)), "r"(htonl(proto)), "0"(sum));
return csum_fold(sum);
}
#endif #endif
...@@ -7,9 +7,6 @@ ...@@ -7,9 +7,6 @@
#ifndef __ASM_X8664_CPUFEATURE_H #ifndef __ASM_X8664_CPUFEATURE_H
#define __ASM_X8664_CPUFEATURE_H #define __ASM_X8664_CPUFEATURE_H
/* Sample usage: CPU_FEATURE_P(cpu.x86_capability, FPU) */
#define CPU_FEATURE_P(CAP, FEATURE) test_bit(CAP, X86_FEATURE_##FEATURE ##_BIT)
#define NCAPINTS 4 /* Currently we have 4 32-bit words worth of info */ #define NCAPINTS 4 /* Currently we have 4 32-bit words worth of info */
/* Intel-defined CPU features, CPUID level 0x00000001, word 0 */ /* Intel-defined CPU features, CPUID level 0x00000001, word 0 */
...@@ -63,11 +60,26 @@ ...@@ -63,11 +60,26 @@
#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ #define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ #define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
#endif /* __ASM_X8664_CPUFEATURE_H */ #define cpu_has(c, bit) test_bit(bit, (c)->x86_capability)
#define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability)
/* #define cpu_has_fpu 1
* Local Variables: #define cpu_has_vme 0
* mode:c #define cpu_has_de 1
* comment-column:42 #define cpu_has_pse 1
* End: #define cpu_has_tsc 1
*/ #define cpu_has_pae ___BUG___
#define cpu_has_pge 1
#define cpu_has_apic 1
#define cpu_has_mtrr 1
#define cpu_has_mmx 1
#define cpu_has_fxsr 1
#define cpu_has_xmm 1
#define cpu_has_ht 0 /* you need to report the support from i386. sorry */
#define cpu_has_mp 1 /* XXX */
#define cpu_has_k6_mtrr 0
#define cpu_has_cyrix_arr 0
#define cpu_has_centaur_mcr 0
#endif /* __ASM_X8664_CPUFEATURE_H */
...@@ -141,17 +141,17 @@ static inline void set_ldt_desc(unsigned n, void *addr, int size) ...@@ -141,17 +141,17 @@ static inline void set_ldt_desc(unsigned n, void *addr, int size)
/* /*
* load one particular LDT into the current CPU * load one particular LDT into the current CPU
*/ */
extern inline void load_LDT (struct mm_struct *mm) extern inline void load_LDT (mm_context_t *pc)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
void *segments = mm->context.segments; int count = pc->size;
if (!segments) { if (!count) {
clear_LDT(cpu); clear_LDT(cpu);
return; return;
} }
set_ldt_desc(cpu, segments, LDT_ENTRIES); set_ldt_desc(cpu, pc->ldt, count);
__load_LDT(cpu); __load_LDT(cpu);
} }
......
...@@ -140,6 +140,7 @@ static inline void empty_fpu(struct task_struct *child) ...@@ -140,6 +140,7 @@ static inline void empty_fpu(struct task_struct *child)
{ {
if (!child->used_math) { if (!child->used_math) {
/* Simulate an empty FPU. */ /* Simulate an empty FPU. */
memset(&child->thread.i387.fxsave,0,sizeof(struct i387_fxsave_struct));
child->thread.i387.fxsave.cwd = 0x037f; child->thread.i387.fxsave.cwd = 0x037f;
child->thread.i387.fxsave.swd = 0; child->thread.i387.fxsave.swd = 0;
child->thread.i387.fxsave.twd = 0; child->thread.i387.fxsave.twd = 0;
......
...@@ -193,9 +193,9 @@ extern void iounmap(void *addr); ...@@ -193,9 +193,9 @@ extern void iounmap(void *addr);
#define __raw_writew writew #define __raw_writew writew
#define __raw_writel writel #define __raw_writel writel
void *memcpy_fromio(void*,void*,unsigned);
void *memcpy_toio(void*,void*,unsigned);
#define memset_io(a,b,c) memset(__io_virt(a),(b),(c)) #define memset_io(a,b,c) memset(__io_virt(a),(b),(c))
#define memcpy_fromio(a,b,c) memcpy((a),__io_virt(b),(c))
#define memcpy_toio(a,b,c) memcpy(__io_virt(a),(b),(c))
/* /*
* ISA space is 'always mapped' on a typical x86 system, no need to * ISA space is 'always mapped' on a typical x86 system, no need to
......
...@@ -67,6 +67,7 @@ ...@@ -67,6 +67,7 @@
#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ #define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ #define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
#define FIOQSIZE 0x5460
/* Used for packet mode */ /* Used for packet mode */
#define TIOCPKT_DATA 0 #define TIOCPKT_DATA 0
......
#ifndef __x86_64_MMU_H #ifndef __x86_64_MMU_H
#define __x86_64_MMU_H #define __x86_64_MMU_H
#include <linux/spinlock.h>
/* /*
* The x86_64 doesn't have a mmu context, but * The x86_64 doesn't have a mmu context, but
* we put the segment information here. * we put the segment information here.
*
* cpu_vm_mask is used to optimize ldt flushing.
*/ */
typedef struct { typedef struct {
void *segments; void *ldt;
unsigned long cpuvalid; rwlock_t ldtlock;
int size;
struct semaphore sem;
} mm_context_t; } mm_context_t;
#endif #endif
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
* possibly do the LDT unload here? * possibly do the LDT unload here?
*/ */
#define destroy_context(mm) do { } while(0) #define destroy_context(mm) do { } while(0)
#define init_new_context(tsk,mm) 0 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -34,20 +34,17 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -34,20 +34,17 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (likely(prev != next)) { if (likely(prev != next)) {
/* stop flush ipis for the previous mm */ /* stop flush ipis for the previous mm */
clear_bit(cpu, &prev->cpu_vm_mask); clear_bit(cpu, &prev->cpu_vm_mask);
/*
* Re-load LDT if necessary
*/
if (unlikely(prev->context.segments != next->context.segments))
load_LDT(next);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
cpu_tlbstate[cpu].state = TLBSTATE_OK; cpu_tlbstate[cpu].state = TLBSTATE_OK;
cpu_tlbstate[cpu].active_mm = next; cpu_tlbstate[cpu].active_mm = next;
#endif #endif
set_bit(cpu, &next->cpu_vm_mask); set_bit(cpu, &next->cpu_vm_mask);
set_bit(cpu, &next->context.cpuvalid);
/* Re-load page tables */ /* Re-load page tables */
*read_pda(level4_pgt) = __pa(next->pgd) | _PAGE_TABLE; *read_pda(level4_pgt) = __pa(next->pgd) | _PAGE_TABLE;
__flush_tlb(); __flush_tlb();
if (next->context.size + prev->context.size)
load_LDT(&next->context);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
else { else {
...@@ -59,9 +56,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -59,9 +56,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* tlb flush IPI delivery. We must flush our tlb. * tlb flush IPI delivery. We must flush our tlb.
*/ */
local_flush_tlb(); local_flush_tlb();
load_LDT(&next->context);
} }
if (!test_and_set_bit(cpu, &next->context.cpuvalid))
load_LDT(next);
} }
#endif #endif
} }
......
...@@ -9,23 +9,18 @@ ...@@ -9,23 +9,18 @@
#define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_SIZE (1UL << PAGE_SHIFT)
#endif #endif
#define PAGE_MASK (~(PAGE_SIZE-1)) #define PAGE_MASK (~(PAGE_SIZE-1))
#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & (__PHYSICAL_MASK << PAGE_SHIFT))
#define THREAD_SIZE (2*PAGE_SIZE) #define THREAD_SIZE (2*PAGE_SIZE)
#define CURRENT_MASK (~(THREAD_SIZE-1)) #define CURRENT_MASK (~(THREAD_SIZE-1))
#ifdef __KERNEL__ #ifdef __KERNEL__
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#if 0 void clear_page(void *);
#include <asm/mmx.h> void copy_page(void *, void *);
#define clear_page(page) mmx_clear_page((void *)(page))
#define copy_page(to,from) mmx_copy_page(to,from)
#else
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
#endif
#define clear_user_page(page, vaddr) clear_page(page) #define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr) copy_page(to, from) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
/* /*
* These are used to make use of C type-checking.. * These are used to make use of C type-checking..
...@@ -34,7 +29,7 @@ typedef struct { unsigned long pte; } pte_t; ...@@ -34,7 +29,7 @@ typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pmd; } pmd_t; typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pml4; } pml4_t; typedef struct { unsigned long pml4; } pml4_t;
#define PTE_MASK PAGE_MASK #define PTE_MASK PHYSICAL_PAGE_MASK
typedef struct { unsigned long pgprot; } pgprot_t; typedef struct { unsigned long pgprot; } pgprot_t;
...@@ -59,9 +54,12 @@ typedef struct { unsigned long pgprot; } pgprot_t; ...@@ -59,9 +54,12 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#define __START_KERNEL 0xffffffff80100000 #define __START_KERNEL 0xffffffff80100000
#define __START_KERNEL_map 0xffffffff80000000 #define __START_KERNEL_map 0xffffffff80000000
#define __PAGE_OFFSET 0x0000010000000000 #define __PAGE_OFFSET 0x0000010000000000
#define __PHYSICAL_MASK 0x000000ffffffffff
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/stringify.h>
/* /*
* Tell the user there is some problem. The exception handler decodes this frame. * Tell the user there is some problem. The exception handler decodes this frame.
*/ */
...@@ -70,7 +68,9 @@ struct bug_frame { ...@@ -70,7 +68,9 @@ struct bug_frame {
char *filename; /* should use 32bit offset instead, but the assembler doesn't like it */ char *filename; /* should use 32bit offset instead, but the assembler doesn't like it */
unsigned short line; unsigned short line;
} __attribute__((packed)); } __attribute__((packed));
#define BUG() asm volatile("ud2 ; .quad %c1 ; .short %c0" :: "i"(__LINE__), "i" (__FILE__)) #define BUG() \
asm volatile("ud2 ; .quad %c1 ; .short %c0" :: \
"i"(__LINE__), "i" (__stringify(KBUILD_BASENAME)))
#define PAGE_BUG(page) BUG() #define PAGE_BUG(page) BUG()
void out_of_line_bug(void); void out_of_line_bug(void);
...@@ -103,8 +103,11 @@ extern __inline__ int get_order(unsigned long size) ...@@ -103,8 +103,11 @@ extern __inline__ int get_order(unsigned long size)
__pa(v); }) __pa(v); })
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT)) #define pfn_to_page(pfn) (mem_map + (pfn))
#define VALID_PAGE(page) ((page - mem_map) < max_mapnr) #define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
#define __x8664_PCI_H #define __x8664_PCI_H
#include <linux/config.h> #include <linux/config.h>
#include <asm/io.h>
#ifdef __KERNEL__ #ifdef __KERNEL__
...@@ -20,17 +19,19 @@ extern unsigned long pci_mem_start; ...@@ -20,17 +19,19 @@ extern unsigned long pci_mem_start;
#define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM (pci_mem_start) #define PCIBIOS_MIN_MEM (pci_mem_start)
void pcibios_config_init(void);
struct pci_bus * pcibios_scan_root(int bus);
extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
void pcibios_set_master(struct pci_dev *dev); void pcibios_set_master(struct pci_dev *dev);
void pcibios_penalize_isa_irq(int irq); void pcibios_penalize_isa_irq(int irq);
struct irq_routing_table *pcibios_get_irq_routing_table(void); struct irq_routing_table *pcibios_get_irq_routing_table(void);
int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
/* Dynamic DMA mapping stuff.
* x8664 has everything mapped statically.
*/
#include <linux/types.h> #include <linux/types.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <asm/scatterlist.h> #include <asm/scatterlist.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/io.h> #include <asm/io.h>
......
...@@ -2,19 +2,17 @@ ...@@ -2,19 +2,17 @@
#define X86_64_PDA_H #define X86_64_PDA_H
#include <linux/stddef.h> #include <linux/stddef.h>
#ifndef ASM_OFFSET_H #include <linux/types.h>
#include <asm/offset.h>
#endif
#include <linux/cache.h> #include <linux/cache.h>
/* Per processor datastructure. %gs points to it while the kernel runs */ /* Per processor datastructure. %gs points to it while the kernel runs */
/* To use a new field with the *_pda macros it needs to be added to tools/offset.c */
struct x8664_pda { struct x8664_pda {
struct x8664_pda *me; struct task_struct *pcurrent; /* Current process */
unsigned long cpudata_offset;
struct x8664_pda *me; /* Pointer to itself */
unsigned long kernelstack; /* TOS for current process */ unsigned long kernelstack; /* TOS for current process */
unsigned long oldrsp; /* user rsp for system call */ unsigned long oldrsp; /* user rsp for system call */
unsigned long irqrsp; /* Old rsp for interrupts. */ unsigned long irqrsp; /* Old rsp for interrupts. */
struct task_struct *pcurrent; /* Current process */
int irqcount; /* Irq nesting counter. Starts with -1 */ int irqcount; /* Irq nesting counter. Starts with -1 */
int cpunumber; /* Logical CPU number */ int cpunumber; /* Logical CPU number */
char *irqstackptr; /* top of irqstack */ char *irqstackptr; /* top of irqstack */
...@@ -39,18 +37,19 @@ extern struct x8664_pda cpu_pda[]; ...@@ -39,18 +37,19 @@ extern struct x8664_pda cpu_pda[];
*/ */
#define sizeof_field(type,field) (sizeof(((type *)0)->field)) #define sizeof_field(type,field) (sizeof(((type *)0)->field))
#define typeof_field(type,field) typeof(((type *)0)->field) #define typeof_field(type,field) typeof(((type *)0)->field)
#ifndef __STR
#define __STR(x) #x
#endif
#define __STR2(x) __STR(x)
extern void __bad_pda_field(void); extern void __bad_pda_field(void);
#define pda_offset(field) offsetof(struct x8664_pda, field)
#define pda_to_op(op,field,val) do { \ #define pda_to_op(op,field,val) do { \
switch (sizeof_field(struct x8664_pda, field)) { \ switch (sizeof_field(struct x8664_pda, field)) { \
case 2: asm volatile(op "w %0,%%gs:" __STR2(pda_ ## field) ::"r" (val):"memory"); break; \ case 2: \
case 4: asm volatile(op "l %0,%%gs:" __STR2(pda_ ## field) ::"r" (val):"memory"); break; \ asm volatile(op "w %0,%%gs:%c1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \
case 8: asm volatile(op "q %0,%%gs:" __STR2(pda_ ## field) ::"r" (val):"memory"); break; \ case 4: \
asm volatile(op "l %0,%%gs:%c1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \
case 8: \
asm volatile(op "q %0,%%gs:%c1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \
default: __bad_pda_field(); \ default: __bad_pda_field(); \
} \ } \
} while (0) } while (0)
...@@ -59,9 +58,12 @@ extern void __bad_pda_field(void); ...@@ -59,9 +58,12 @@ extern void __bad_pda_field(void);
#define pda_from_op(op,field) ({ \ #define pda_from_op(op,field) ({ \
typedef typeof_field(struct x8664_pda, field) T__; T__ ret__; \ typedef typeof_field(struct x8664_pda, field) T__; T__ ret__; \
switch (sizeof_field(struct x8664_pda, field)) { \ switch (sizeof_field(struct x8664_pda, field)) { \
case 2: asm volatile(op "w %%gs:" __STR2(pda_ ## field) ",%0":"=r" (ret__)::"memory"); break; \ case 2: \
case 4: asm volatile(op "l %%gs:" __STR2(pda_ ## field) ",%0":"=r" (ret__)::"memory"); break; \ asm volatile(op "w %%gs:%c1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
case 8: asm volatile(op "q %%gs:" __STR2(pda_ ## field) ",%0":"=r" (ret__)::"memory"); break; \ case 4: \
asm volatile(op "l %%gs:%c1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
case 8: \
asm volatile(op "q %%gs:%c1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
default: __bad_pda_field(); \ default: __bad_pda_field(); \
} \ } \
ret__; }) ret__; })
......
#ifndef __ARCH_I386_PERCPU__ #ifndef __ARCH_X8664_PERCPU__
#define __ARCH_I386_PERCPU__ #define __ARCH_X8664_PERCPU__
#include <asm-generic/percpu.h> #include <asm/pda.h>
#endif /* __ARCH_I386_PERCPU__ */ /* var is in discarded region: offset to particular copy we want */
#define this_cpu(var) (*RELOC_HIDE(&var, read_pda(cpudata_offset)))
#define per_cpu(var, cpu) (*RELOC_HIDE(&var, per_cpu_pda[cpu]))
void setup_per_cpu_areas(void);
#endif /* __ARCH_X8664_PERCPU__ */
...@@ -75,5 +75,7 @@ extern inline void pte_free(struct page *pte) ...@@ -75,5 +75,7 @@ extern inline void pte_free(struct page *pte)
__free_page(pte); __free_page(pte);
} }
#define pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
#define pmd_free_tlb(tlb,x) do { } while (0)
#endif /* _X86_64_PGALLOC_H */ #endif /* _X86_64_PGALLOC_H */
...@@ -65,6 +65,7 @@ extern unsigned long empty_zero_page[1024]; ...@@ -65,6 +65,7 @@ extern unsigned long empty_zero_page[1024];
#define pgd_ERROR(e) \ #define pgd_ERROR(e) \
printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
#define pml4_none(x) (!pml4_val(x)) #define pml4_none(x) (!pml4_val(x))
#define pgd_none(x) (!pgd_val(x)) #define pgd_none(x) (!pgd_val(x))
...@@ -98,13 +99,8 @@ static inline void set_pml4(pml4_t *dst, pml4_t val) ...@@ -98,13 +99,8 @@ static inline void set_pml4(pml4_t *dst, pml4_t val)
#define pgd_page(pgd) \ #define pgd_page(pgd) \
((unsigned long) __va(pgd_val(pgd) & PAGE_MASK)) ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
/* Find an entry in the second-level page table.. */
#define pmd_offset(dir, address) ((pmd_t *) pgd_page(*(dir)) + \
__pmd_offset(address))
#define ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte, 0)) #define ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte, 0))
#define pte_same(a, b) ((a).pte == (b).pte) #define pte_same(a, b) ((a).pte == (b).pte)
#define __mk_pte(page_nr,pgprot) __pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
#define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1)) #define PMD_MASK (~(PMD_SIZE-1))
...@@ -202,7 +198,7 @@ static inline void set_pml4(pml4_t *dst, pml4_t val) ...@@ -202,7 +198,7 @@ static inline void set_pml4(pml4_t *dst, pml4_t val)
static inline unsigned long pgd_bad(pgd_t pgd) static inline unsigned long pgd_bad(pgd_t pgd)
{ {
unsigned long val = pgd_val(pgd); unsigned long val = pgd_val(pgd);
val &= ~PAGE_MASK; val &= ~PTE_MASK;
val &= ~(_PAGE_USER | _PAGE_DIRTY); val &= ~(_PAGE_USER | _PAGE_DIRTY);
return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED); return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
} }
...@@ -211,14 +207,18 @@ static inline unsigned long pgd_bad(pgd_t pgd) ...@@ -211,14 +207,18 @@ static inline unsigned long pgd_bad(pgd_t pgd)
#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
#define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0) #define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0)
#define pmd_none(x) (!pmd_val(x))
#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE )
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this
right? */ right? */
#define pte_page(x) (mem_map+((unsigned long)((pte_val(x) >> PAGE_SHIFT)))) #define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_pfn(x) ((pte_val(x) >> PAGE_SHIFT) & __PHYSICAL_MASK)
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
pte_t pte;
pte_val(pte) = (page_nr << PAGE_SHIFT) | pgprot_val(pgprot);
return pte;
}
/* /*
* The following only work if pte_present() is true. * The following only work if pte_present() is true.
...@@ -256,7 +256,7 @@ static inline void ptep_mkdirty(pte_t *ptep) { set_bit(_PAGE_BIT_DIRTY, ptep); ...@@ -256,7 +256,7 @@ static inline void ptep_mkdirty(pte_t *ptep) { set_bit(_PAGE_BIT_DIRTY, ptep);
* Level 4 access. * Level 4 access.
* Never use these in the common code. * Never use these in the common code.
*/ */
#define pml4_page(pml4) ((unsigned long) __va(pml4_val(pml4) & PAGE_MASK)) #define pml4_page(pml4) ((unsigned long) __va(pml4_val(pml4) & PTE_MASK))
#define pml4_index(address) ((address >> PML4_SHIFT) & (PTRS_PER_PML4-1)) #define pml4_index(address) ((address >> PML4_SHIFT) & (PTRS_PER_PML4-1))
#define pml4_offset_k(address) (init_level4_pgt + pml4_index(address)) #define pml4_offset_k(address) (init_level4_pgt + pml4_index(address))
#define mk_kernel_pml4(address) ((pml4_t){ (address) | _KERNPG_TABLE }) #define mk_kernel_pml4(address) ((pml4_t){ (address) | _KERNPG_TABLE })
...@@ -277,41 +277,42 @@ static inline pgd_t *pgd_offset_k(unsigned long address) ...@@ -277,41 +277,42 @@ static inline pgd_t *pgd_offset_k(unsigned long address)
pml4_t pml4; pml4_t pml4;
pml4 = init_level4_pgt[pml4_index(address)]; pml4 = init_level4_pgt[pml4_index(address)];
return __pgd_offset_k(__va(pml4_val(pml4) & PAGE_MASK), address); return __pgd_offset_k(__va(pml4_val(pml4) & PTE_MASK), address);
} }
#define __pgd_offset(address) pgd_index(address) #define __pgd_offset(address) pgd_index(address)
#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
/* PMD - Level 2 access */ /* PMD - Level 2 access */
#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
#define pmd_page(pmd) (mem_map + (pmd_val(pmd) >> PAGE_SHIFT)) #define pmd_page(pmd) (mem_map + ((pmd_val(pmd) & PTE_MASK)>>PAGE_SHIFT))
#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
#define __pmd_offset(address) \ #define pmd_offset(dir, address) ((pmd_t *) pgd_page(*(dir)) + \
(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) __pmd_offset(address))
#define pmd_none(x) (!pmd_val(x))
#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
#define pmd_bad(x) ((pmd_val(x) & (~PTE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE )
#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
/* PTE - Level 1 access. */ /* PTE - Level 1 access. */
#define mk_pte(page,pgprot) \ /* page, protection -> pte */
({ \ #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
pte_t __pte; \
\
set_pte(&__pte, __pte(((page)-mem_map) * \
(unsigned long long)PAGE_SIZE + pgprot_val(pgprot))); \
__pte; \
})
/* This takes a physical page address that is used by the remapping functions */ /* physical address -> PTE */
static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{ {
pte_t __pte; pte_t pte;
set_pte(&__pte, __pte(physpage + pgprot_val(pgprot))); pte_val(pte) = physpage | pgprot_val(pgprot);
return __pte; return pte;
} }
/* Change flags of a PTE */
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ {
set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); pte_val(pte) &= _PAGE_CHG_MASK;
pte_val(pte) |= pgprot_val(newprot);
return pte; return pte;
} }
......
...@@ -84,17 +84,6 @@ extern struct cpuinfo_x86 cpu_data[]; ...@@ -84,17 +84,6 @@ extern struct cpuinfo_x86 cpu_data[];
#define current_cpu_data boot_cpu_data #define current_cpu_data boot_cpu_data
#endif #endif
#define cpu_has_pge 1
#define cpu_has_pse 1
#define cpu_has_pae 1
#define cpu_has_tsc 1
#define cpu_has_de 1
#define cpu_has_vme 1
#define cpu_has_fxsr 1
#define cpu_has_mmx 1
#define cpu_has_xmm 1
#define cpu_has_apic 1
extern char ignore_irq13; extern char ignore_irq13;
extern void identify_cpu(struct cpuinfo_x86 *); extern void identify_cpu(struct cpuinfo_x86 *);
...@@ -371,8 +360,6 @@ extern void release_thread(struct task_struct *); ...@@ -371,8 +360,6 @@ extern void release_thread(struct task_struct *);
*/ */
extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
/* Copy and release all segment info associated with a VM */
extern void copy_segments(struct task_struct *p, struct mm_struct * mm);
extern void release_segments(struct mm_struct * mm); extern void release_segments(struct mm_struct * mm);
/* /*
......
...@@ -85,6 +85,7 @@ struct pt_regs { ...@@ -85,6 +85,7 @@ struct pt_regs {
#define user_mode(regs) (!!((regs)->cs & 3)) #define user_mode(regs) (!!((regs)->cs & 3))
#define instruction_pointer(regs) ((regs)->rip) #define instruction_pointer(regs) ((regs)->rip)
extern void show_regs(struct pt_regs *); extern void show_regs(struct pt_regs *);
void signal_fault(struct pt_regs *regs, void *frame, char *where);
enum { enum {
EF_CF = 0x00000001, EF_CF = 0x00000001,
......
...@@ -146,18 +146,18 @@ static inline void __up_read(struct rw_semaphore *sem) ...@@ -146,18 +146,18 @@ static inline void __up_read(struct rw_semaphore *sem)
__s32 tmp = -RWSEM_ACTIVE_READ_BIAS; __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
__asm__ __volatile__( __asm__ __volatile__(
"# beginning __up_read\n\t" "# beginning __up_read\n\t"
LOCK_PREFIX " xaddl %%edx,(%%rdi)\n\t" /* subtracts 1, returns the old value */ LOCK_PREFIX " xaddl %[tmp],(%%rdi)\n\t" /* subtracts 1, returns the old value */
" js 2f\n\t" /* jump if the lock is being waited upon */ " js 2f\n\t" /* jump if the lock is being waited upon */
"1:\n\t" "1:\n\t"
LOCK_SECTION_START("") LOCK_SECTION_START("")
"2:\n\t" "2:\n\t"
" decw %%dx\n\t" /* do nothing if still outstanding active readers */ " decw %w[tmp]\n\t" /* do nothing if still outstanding active readers */
" jnz 1b\n\t" " jnz 1b\n\t"
" call rwsem_wake_thunk\n\t" " call rwsem_wake_thunk\n\t"
" jmp 1b\n" " jmp 1b\n"
LOCK_SECTION_END LOCK_SECTION_END
"# ending __up_read\n" "# ending __up_read\n"
: "+m"(sem->count), "+d"(tmp) : "+m"(sem->count), "+r" (tmp)
: "D"(sem) : "D"(sem)
: "memory", "cc"); : "memory", "cc");
} }
...@@ -167,23 +167,24 @@ LOCK_PREFIX " xaddl %%edx,(%%rdi)\n\t" /* subtracts 1, returns the old val ...@@ -167,23 +167,24 @@ LOCK_PREFIX " xaddl %%edx,(%%rdi)\n\t" /* subtracts 1, returns the old val
*/ */
static inline void __up_write(struct rw_semaphore *sem) static inline void __up_write(struct rw_semaphore *sem)
{ {
unsigned tmp;
__asm__ __volatile__( __asm__ __volatile__(
"# beginning __up_write\n\t" "# beginning __up_write\n\t"
" movl %2,%%edx\n\t" " movl %2,%[tmp]\n\t"
LOCK_PREFIX " xaddl %%edx,(%%rdi)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */ LOCK_PREFIX " xaddl %[tmp],(%%rdi)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
" jnz 2f\n\t" /* jump if the lock is being waited upon */ " jnz 2f\n\t" /* jump if the lock is being waited upon */
"1:\n\t" "1:\n\t"
LOCK_SECTION_START("") LOCK_SECTION_START("")
"2:\n\t" "2:\n\t"
" decw %%dx\n\t" /* did the active count reduce to 0? */ " decw %w[tmp]\n\t" /* did the active count reduce to 0? */
" jnz 1b\n\t" /* jump back if not */ " jnz 1b\n\t" /* jump back if not */
" call rwsem_wake_thunk\n\t" " call rwsem_wake_thunk\n\t"
" jmp 1b\n" " jmp 1b\n"
LOCK_SECTION_END LOCK_SECTION_END
"# ending __up_write\n" "# ending __up_write\n"
: "+m"(sem->count) : "+m"(sem->count), [tmp] "r" (tmp)
: "D"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS) : "D"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS)
: "memory", "cc", "rdx"); : "memory", "cc");
} }
/* /*
......
#ifndef _ASMx8664_SIGNAL_H #ifndef _ASMx8664_SIGNAL_H
#define _ASMx8664_SIGNAL_H #define _ASMx8664_SIGNAL_H
#ifndef __ASSEMBLY__
#include <linux/types.h> #include <linux/types.h>
#include <linux/linkage.h> #include <linux/linkage.h>
...@@ -33,6 +34,7 @@ asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset); ...@@ -33,6 +34,7 @@ asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
typedef unsigned long sigset_t; typedef unsigned long sigset_t;
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif
#define SIGHUP 1 #define SIGHUP 1
#define SIGINT 2 #define SIGINT 2
...@@ -131,6 +133,7 @@ typedef unsigned long sigset_t; ...@@ -131,6 +133,7 @@ typedef unsigned long sigset_t;
#define SIG_UNBLOCK 1 /* for unblocking signals */ #define SIG_UNBLOCK 1 /* for unblocking signals */
#define SIG_SETMASK 2 /* for setting the signal mask */ #define SIG_SETMASK 2 /* for setting the signal mask */
#ifndef __ASSEMBLY__
/* Type of a signal handler. */ /* Type of a signal handler. */
typedef void (*__sighandler_t)(int); typedef void (*__sighandler_t)(int);
...@@ -200,6 +203,7 @@ extern __inline__ int sigfindinword(unsigned long word) ...@@ -200,6 +203,7 @@ extern __inline__ int sigfindinword(unsigned long word)
return word; return word;
} }
#endif #endif
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif #endif
...@@ -26,7 +26,8 @@ ...@@ -26,7 +26,8 @@
"2: call do_softirq_thunk;" \ "2: call do_softirq_thunk;" \
"jmp 1b;" \ "jmp 1b;" \
".previous" \ ".previous" \
:: "i" (pda___softirq_pending), "i" (pda___local_bh_count) : \ :: "i" (pda_offset(__softirq_pending)), \
"i" (pda_offset(__local_bh_count)) : \
"memory"); \ "memory"); \
} while (0) } while (0)
#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while(0) #define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while(0)
......
...@@ -2,36 +2,76 @@ ...@@ -2,36 +2,76 @@
#define _X86_64_STRING_H_ #define _X86_64_STRING_H_
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/config.h>
#define struct_cpy(x,y) (*(x)=*(y)) #define struct_cpy(x,y) (*(x)=*(y))
#define __HAVE_ARCH_MEMCMP /* Written 2002 by Andi Kleen */
#define __HAVE_ARCH_STRLEN
#define memset __builtin_memset /* Only used for special circumstances. Stolen from i386/string.h */
#define memcpy __builtin_memcpy static inline void * __inline_memcpy(void * to, const void * from, size_t n)
#define memcmp __builtin_memcmp
/* Work around "undefined reference to strlen" linker errors. */
/* #define strlen __builtin_strlen */
#define __HAVE_ARCH_STRLEN
static inline size_t strlen(const char * s)
{ {
int d0; unsigned long d0, d1, d2;
register int __res;
__asm__ __volatile__( __asm__ __volatile__(
"repne\n\t" "rep ; movsl\n\t"
"scasb\n\t" "testb $2,%b4\n\t"
"notl %0\n\t" "je 1f\n\t"
"decl %0" "movsw\n"
:"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffff)); "1:\ttestb $1,%b4\n\t"
return __res; "je 2f\n\t"
"movsb\n"
"2:"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
:"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
: "memory");
return (to);
} }
/* Even with __builtin_ the compiler may decide to use the out of line
function. */
#define __HAVE_ARCH_MEMCPY 1
extern void *__memcpy(void *to, const void *from, size_t len);
#define memcpy(dst,src,len) \
({ size_t __len = (len); \
void *__ret; \
if (__builtin_constant_p(len) && __len >= 64) \
__ret = __memcpy((dst),(src),__len); \
else \
__ret = __builtin_memcpy((dst),(src),__len); \
__ret; })
#if 0
#define __HAVE_ARCH_MEMSET
extern void *__memset(void *mem, int val, size_t len);
#define memset(dst,val,len) \
({ size_t __len = (len); \
void *__ret; \
if (__builtin_constant_p(len) && __len >= 64) \
__ret = __memset((dst),(val),__len); \
else \
__ret = __builtin_memset((dst),(val),__len); \
__ret; })
#endif
#define __HAVE_ARCH_MEMMOVE
void * memmove(void * dest,const void *src,size_t count);
/* Use C out of line version for memcmp */
#define memcmp __builtin_memcmp
int memcmp(const void * cs,const void * ct,size_t count);
/* out of line string functions use always C versions */
#define strlen __builtin_strlen
size_t strlen(const char * s);
#define strcpy __builtin_strcpy
char * strcpy(char * dest,const char *src);
#define strcat __builtin_strcat
char * strcat(char * dest, const char * src);
extern char *strstr(const char *cs, const char *ct); #define strcmp __builtin_strcmp
int strcmp(const char * cs,const char * ct);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -51,6 +51,8 @@ struct thread_info { ...@@ -51,6 +51,8 @@ struct thread_info {
/* how to get the thread information struct from C */ /* how to get the thread information struct from C */
#define THREAD_SIZE (2*PAGE_SIZE)
static inline struct thread_info *current_thread_info(void) static inline struct thread_info *current_thread_info(void)
{ {
struct thread_info *ti; struct thread_info *ti;
...@@ -66,7 +68,6 @@ static inline struct thread_info *stack_thread_info(void) ...@@ -66,7 +68,6 @@ static inline struct thread_info *stack_thread_info(void)
} }
/* thread information allocation */ /* thread information allocation */
#define THREAD_SIZE (2*PAGE_SIZE)
#define alloc_thread_info() ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) #define alloc_thread_info() ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) #define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#define get_thread_info(ti) get_task_struct((ti)->task) #define get_thread_info(ti) get_task_struct((ti)->task)
...@@ -75,6 +76,7 @@ static inline struct thread_info *stack_thread_info(void) ...@@ -75,6 +76,7 @@ static inline struct thread_info *stack_thread_info(void)
#else /* !__ASSEMBLY__ */ #else /* !__ASSEMBLY__ */
/* how to get the thread information struct from ASM */ /* how to get the thread information struct from ASM */
/* only works on the process stack. otherwise get it via the PDA. */
#define GET_THREAD_INFO(reg) \ #define GET_THREAD_INFO(reg) \
movq $-8192, reg; \ movq $-8192, reg; \
andq %rsp, reg andq %rsp, reg
......
...@@ -48,4 +48,6 @@ static inline cycles_t get_cycles (void) ...@@ -48,4 +48,6 @@ static inline cycles_t get_cycles (void)
extern unsigned int cpu_khz; extern unsigned int cpu_khz;
#define ARCH_HAS_JIFFIES_64
#endif #endif
#ifndef TLB_H
#define TLB_H 1
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
#endif
...@@ -17,8 +17,8 @@ typedef unsigned short __u16; ...@@ -17,8 +17,8 @@ typedef unsigned short __u16;
typedef __signed__ int __s32; typedef __signed__ int __s32;
typedef unsigned int __u32; typedef unsigned int __u32;
typedef __signed__ long __s64; typedef __signed__ long long __s64;
typedef unsigned long __u64; typedef unsigned long long __u64;
/* /*
* These aren't exported outside the kernel to avoid name space clashes * These aren't exported outside the kernel to avoid name space clashes
...@@ -34,8 +34,8 @@ typedef unsigned short u16; ...@@ -34,8 +34,8 @@ typedef unsigned short u16;
typedef signed int s32; typedef signed int s32;
typedef unsigned int u32; typedef unsigned int u32;
typedef signed long s64; typedef signed long long s64;
typedef unsigned long u64; typedef unsigned long long u64;
#define BITS_PER_LONG 64 #define BITS_PER_LONG 64
......
...@@ -230,138 +230,19 @@ do { \ ...@@ -230,138 +230,19 @@ do { \
/* /*
* Copy To/From Userspace * Copy To/From Userspace
*
* This relies on an optimized common worker function.
*
* Could do special inline versions for small constant copies, but avoid this
* for now. It's not clear it is worth it.
*/ */
/* Generic arbitrary sized copy. */ extern unsigned long copy_user_generic(void *to, const void *from, unsigned len);
/* Could do 8byte accesses, instead of 4bytes. */
/* Generic arbitrary sized copy. */
#define __copy_user(to,from,size) \
do { \
long __d0, __d1; \
__asm__ __volatile__( \
"0: rep; movsl\n" \
" movq %3,%0\n" \
"1: rep; movsb\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: lea 0(%3,%0,4),%0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 8\n" \
" .quad 0b,3b\n" \
" .quad 1b,2b\n" \
".previous" \
: "=&c"(size), "=&D" (__d0), "=&S" (__d1) \
: "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \
: "memory"); \
} while (0)
#define __copy_user_zeroing(to,from,size) \
do { \
long __d0, __d1; \
__asm__ __volatile__( \
"0: rep; movsl\n" \
" movq %3,%0\n" \
"1: rep; movsb\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: lea 0(%3,%0,4),%0\n" \
"4: pushq %0\n" \
" pushq %%rax\n" \
" xorq %%rax,%%rax\n" \
" rep; stosb\n" \
" popq %%rax\n" \
" popq %0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 8\n" \
" .quad 0b,3b\n" \
" .quad 1b,4b\n" \
".previous" \
: "=&c"(size), "=&D" (__d0), "=&S" (__d1) \
: "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \
: "memory"); \
} while (0)
/* We let the __ versions of copy_from/to_user inline, because they're often
* used in fast paths and have only a small space overhead.
*/
static inline unsigned long
__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
{
__copy_user_zeroing(to,from,n);
return n;
}
static inline unsigned long
__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
{
prefetch(from);
__copy_user(to,from,n);
return n;
}
unsigned long __generic_copy_to_user(void *, const void *, unsigned long);
unsigned long __generic_copy_from_user(void *, const void *, unsigned long);
static inline unsigned long
__constant_copy_to_user(void *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
__copy_user(to,from,n);
return n;
}
static inline unsigned long
__constant_copy_from_user(void *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
__copy_user_zeroing(to,from,n);
else
memset(to, 0, n);
return n;
}
static inline unsigned long
__constant_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
{
__copy_user(to,from,n);
return n;
}
static inline unsigned long
__constant_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
{
__copy_user_zeroing(to,from,n);
return n;
}
#define copy_to_user(to,from,n) \ extern unsigned long copy_to_user(void *to, const void *from, unsigned len);
(__builtin_constant_p(n) ? \ extern unsigned long copy_from_user(void *to, const void *from, unsigned len);
__constant_copy_to_user((to),(from),(n)) : \ #define __copy_to_user copy_user_generic
__generic_copy_to_user((to),(from),(n))) #define __copy_from_user copy_user_generic
#define copy_from_user(to,from,n) \
(__builtin_constant_p(n) ? \
__constant_copy_from_user((to),(from),(n)) : \
__generic_copy_from_user((to),(from),(n)))
#define __copy_to_user(to,from,n) \
(__builtin_constant_p(n) ? \
__constant_copy_to_user_nocheck((to),(from),(n)) : \
__generic_copy_to_user_nocheck((to),(from),(n)))
#define __copy_from_user(to,from,n) \
(__builtin_constant_p(n) ? \
__constant_copy_from_user_nocheck((to),(from),(n)) : \
__generic_copy_from_user_nocheck((to),(from),(n)))
long strncpy_from_user(char *dst, const char *src, long count); long strncpy_from_user(char *dst, const char *src, long count);
long __strncpy_from_user(char *dst, const char *src, long count); long __strncpy_from_user(char *dst, const char *src, long count);
......
...@@ -84,6 +84,7 @@ struct user{ ...@@ -84,6 +84,7 @@ struct user{
/* ptrace does not yet supply these. Someday.... */ /* ptrace does not yet supply these. Someday.... */
int u_fpvalid; /* True if math co-processor being used. */ int u_fpvalid; /* True if math co-processor being used. */
/* for this mess. Not yet used. */ /* for this mess. Not yet used. */
int pad0;
struct user_i387_struct i387; /* Math Co-processor registers. */ struct user_i387_struct i387; /* Math Co-processor registers. */
/* The rest of this junk is to help gdb figure out what goes where */ /* The rest of this junk is to help gdb figure out what goes where */
unsigned long int u_tsize; /* Text segment size (pages). */ unsigned long int u_tsize; /* Text segment size (pages). */
...@@ -96,12 +97,15 @@ struct user{ ...@@ -96,12 +97,15 @@ struct user{
esp register. */ esp register. */
long int signal; /* Signal that caused the core dump. */ long int signal; /* Signal that caused the core dump. */
int reserved; /* No longer used */ int reserved; /* No longer used */
int pad1;
struct user_pt_regs * u_ar0; /* Used by gdb to help find the values for */ struct user_pt_regs * u_ar0; /* Used by gdb to help find the values for */
/* the registers. */ /* the registers. */
struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */ struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */
unsigned long magic; /* To uniquely identify a core file */ unsigned long magic; /* To uniquely identify a core file */
char u_comm[32]; /* User command that was responsible */ char u_comm[32]; /* User command that was responsible */
unsigned long u_debugreg[8]; unsigned long u_debugreg[8];
unsigned long error_code; /* CPU error code or 0 */
unsigned long fault_address; /* CR3 or 0 */
}; };
#define NBPG PAGE_SIZE #define NBPG PAGE_SIZE
#define UPAGES 1 #define UPAGES 1
......
/* /*
* include/asm-i386/xor.h * include/asm-x86_64/xor.h
* *
* Optimized RAID-5 checksumming functions for MMX and SSE. * Optimized RAID-5 checksumming functions for MMX and SSE.
* *
...@@ -13,525 +13,32 @@ ...@@ -13,525 +13,32 @@
* Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/ */
/* /*
* High-speed RAID5 checksumming functions utilizing MMX instructions. * Cache avoiding checksumming functions utilizing KNI instructions
* Copyright (C) 1998 Ingo Molnar. * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
*/ */
#define FPU_SAVE \ /*
do { \ * Based on
if (!test_thread_flag(TIF_USEDFPU)) \ * High-speed RAID5 checksumming functions utilizing SSE instructions.
__asm__ __volatile__ (" clts;\n"); \ * Copyright (C) 1998 Ingo Molnar.
__asm__ __volatile__ ("fsave %0; fwait": "=m"(fpu_save[0])); \ */
} while (0)
#define FPU_RESTORE \
do { \
__asm__ __volatile__ ("frstor %0": : "m"(fpu_save[0])); \
if (!test_thread_flag(TIF_USEDFPU)) \
stts(); \
} while (0)
#define LD(x,y) " movq 8*("#x")(%1), %%mm"#y" ;\n"
#define ST(x,y) " movq %%mm"#y", 8*("#x")(%1) ;\n"
#define XO1(x,y) " pxor 8*("#x")(%2), %%mm"#y" ;\n"
#define XO2(x,y) " pxor 8*("#x")(%3), %%mm"#y" ;\n"
#define XO3(x,y) " pxor 8*("#x")(%4), %%mm"#y" ;\n"
#define XO4(x,y) " pxor 8*("#x")(%5), %%mm"#y" ;\n"
static void
xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
{
unsigned long lines = bytes >> 7;
char fpu_save[108];
FPU_SAVE;
__asm__ __volatile__ (
#undef BLOCK
#define BLOCK(i) \
LD(i,0) \
LD(i+1,1) \
LD(i+2,2) \
LD(i+3,3) \
XO1(i,0) \
ST(i,0) \
XO1(i+1,1) \
ST(i+1,1) \
XO1(i+2,2) \
ST(i+2,2) \
XO1(i+3,3) \
ST(i+3,3)
" .align 32 ;\n"
" 1: ;\n"
BLOCK(0)
BLOCK(4)
BLOCK(8)
BLOCK(12)
" addq $128, %1 ;\n"
" addq $128, %2 ;\n"
" decq %0 ;\n"
" jnz 1b ;\n"
:
: "r" (lines),
"r" (p1), "r" (p2)
: "memory");
FPU_RESTORE;
}
static void
xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3)
{
unsigned long lines = bytes >> 7;
char fpu_save[108];
FPU_SAVE;
__asm__ __volatile__ (
#undef BLOCK
#define BLOCK(i) \
LD(i,0) \
LD(i+1,1) \
LD(i+2,2) \
LD(i+3,3) \
XO1(i,0) \
XO1(i+1,1) \
XO1(i+2,2) \
XO1(i+3,3) \
XO2(i,0) \
ST(i,0) \
XO2(i+1,1) \
ST(i+1,1) \
XO2(i+2,2) \
ST(i+2,2) \
XO2(i+3,3) \
ST(i+3,3)
" .align 32 ;\n"
" 1: ;\n"
BLOCK(0)
BLOCK(4)
BLOCK(8)
BLOCK(12)
" addq $128, %1 ;\n"
" addq $128, %2 ;\n"
" addq $128, %3 ;\n"
" decq %0 ;\n"
" jnz 1b ;\n"
:
: "r" (lines),
"r" (p1), "r" (p2), "r" (p3)
: "memory");
FPU_RESTORE;
}
static void
xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4)
{
unsigned long lines = bytes >> 7;
char fpu_save[108];
FPU_SAVE;
__asm__ __volatile__ (
#undef BLOCK
#define BLOCK(i) \
LD(i,0) \
LD(i+1,1) \
LD(i+2,2) \
LD(i+3,3) \
XO1(i,0) \
XO1(i+1,1) \
XO1(i+2,2) \
XO1(i+3,3) \
XO2(i,0) \
XO2(i+1,1) \
XO2(i+2,2) \
XO2(i+3,3) \
XO3(i,0) \
ST(i,0) \
XO3(i+1,1) \
ST(i+1,1) \
XO3(i+2,2) \
ST(i+2,2) \
XO3(i+3,3) \
ST(i+3,3)
" .align 32 ;\n"
" 1: ;\n"
BLOCK(0)
BLOCK(4)
BLOCK(8)
BLOCK(12)
" addq $128, %1 ;\n"
" addq $128, %2 ;\n"
" addq $128, %3 ;\n"
" addq $128, %4 ;\n"
" decq %0 ;\n"
" jnz 1b ;\n"
:
: "r" (lines),
"r" (p1), "r" (p2), "r" (p3), "r" (p4)
: "memory");
FPU_RESTORE;
}
static void
xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4, unsigned long *p5)
{
unsigned long lines = bytes >> 7;
char fpu_save[108];
FPU_SAVE;
__asm__ __volatile__ (
#undef BLOCK
#define BLOCK(i) \
LD(i,0) \
LD(i+1,1) \
LD(i+2,2) \
LD(i+3,3) \
XO1(i,0) \
XO1(i+1,1) \
XO1(i+2,2) \
XO1(i+3,3) \
XO2(i,0) \
XO2(i+1,1) \
XO2(i+2,2) \
XO2(i+3,3) \
XO3(i,0) \
XO3(i+1,1) \
XO3(i+2,2) \
XO3(i+3,3) \
XO4(i,0) \
ST(i,0) \
XO4(i+1,1) \
ST(i+1,1) \
XO4(i+2,2) \
ST(i+2,2) \
XO4(i+3,3) \
ST(i+3,3)
" .align 32 ;\n"
" 1: ;\n"
BLOCK(0)
BLOCK(4)
BLOCK(8)
BLOCK(12)
" addq $128, %1 ;\n"
" addq $128, %2 ;\n"
" addq $128, %3 ;\n"
" addq $128, %4 ;\n"
" addq $128, %5 ;\n"
" decq %0 ;\n"
" jnz 1b ;\n"
:
: "g" (lines),
"r" (p1), "r" (p2), "r" (p3), "r" (p4), "r" (p5)
: "memory");
FPU_RESTORE;
}
#undef LD
#undef XO1
#undef XO2
#undef XO3
#undef XO4
#undef ST
#undef BLOCK
static void
xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
{
unsigned long lines = bytes >> 6;
char fpu_save[108];
FPU_SAVE;
__asm__ __volatile__ (
" .align 32 ;\n"
" 1: ;\n"
" movq (%1), %%mm0 ;\n"
" movq 8(%1), %%mm1 ;\n"
" pxor (%2), %%mm0 ;\n"
" movq 16(%1), %%mm2 ;\n"
" movq %%mm0, (%1) ;\n"
" pxor 8(%2), %%mm1 ;\n"
" movq 24(%1), %%mm3 ;\n"
" movq %%mm1, 8(%1) ;\n"
" pxor 16(%2), %%mm2 ;\n"
" movq 32(%1), %%mm4 ;\n"
" movq %%mm2, 16(%1) ;\n"
" pxor 24(%2), %%mm3 ;\n"
" movq 40(%1), %%mm5 ;\n"
" movq %%mm3, 24(%1) ;\n"
" pxor 32(%2), %%mm4 ;\n"
" movq 48(%1), %%mm6 ;\n"
" movq %%mm4, 32(%1) ;\n"
" pxor 40(%2), %%mm5 ;\n"
" movq 56(%1), %%mm7 ;\n"
" movq %%mm5, 40(%1) ;\n"
" pxor 48(%2), %%mm6 ;\n"
" pxor 56(%2), %%mm7 ;\n"
" movq %%mm6, 48(%1) ;\n"
" movq %%mm7, 56(%1) ;\n"
" addq $64, %1 ;\n"
" addq $64, %2 ;\n"
" decq %0 ;\n"
" jnz 1b ;\n"
:
: "r" (lines),
"r" (p1), "r" (p2)
: "memory");
FPU_RESTORE;
}
static void
xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3)
{
unsigned long lines = bytes >> 6;
char fpu_save[108];
FPU_SAVE;
__asm__ __volatile__ (
" .align 32,0x90 ;\n"
" 1: ;\n"
" movq (%1), %%mm0 ;\n"
" movq 8(%1), %%mm1 ;\n"
" pxor (%2), %%mm0 ;\n"
" movq 16(%1), %%mm2 ;\n"
" pxor 8(%2), %%mm1 ;\n"
" pxor (%3), %%mm0 ;\n"
" pxor 16(%2), %%mm2 ;\n"
" movq %%mm0, (%1) ;\n"
" pxor 8(%3), %%mm1 ;\n"
" pxor 16(%3), %%mm2 ;\n"
" movq 24(%1), %%mm3 ;\n"
" movq %%mm1, 8(%1) ;\n"
" movq 32(%1), %%mm4 ;\n"
" movq 40(%1), %%mm5 ;\n"
" pxor 24(%2), %%mm3 ;\n"
" movq %%mm2, 16(%1) ;\n"
" pxor 32(%2), %%mm4 ;\n"
" pxor 24(%3), %%mm3 ;\n"
" pxor 40(%2), %%mm5 ;\n"
" movq %%mm3, 24(%1) ;\n"
" pxor 32(%3), %%mm4 ;\n"
" pxor 40(%3), %%mm5 ;\n"
" movq 48(%1), %%mm6 ;\n"
" movq %%mm4, 32(%1) ;\n"
" movq 56(%1), %%mm7 ;\n"
" pxor 48(%2), %%mm6 ;\n"
" movq %%mm5, 40(%1) ;\n"
" pxor 56(%2), %%mm7 ;\n"
" pxor 48(%3), %%mm6 ;\n"
" pxor 56(%3), %%mm7 ;\n"
" movq %%mm6, 48(%1) ;\n"
" movq %%mm7, 56(%1) ;\n"
" addq $64, %1 ;\n"
" addq $64, %2 ;\n"
" addq $64, %3 ;\n"
" decq %0 ;\n"
" jnz 1b ;\n"
:
: "r" (lines),
"r" (p1), "r" (p2), "r" (p3)
: "memory" );
FPU_RESTORE;
}
static void
xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4)
{
unsigned long lines = bytes >> 6;
char fpu_save[108];
FPU_SAVE;
__asm__ __volatile__ (
" .align 32,0x90 ;\n"
" 1: ;\n"
" movq (%1), %%mm0 ;\n"
" movq 8(%1), %%mm1 ;\n"
" pxor (%2), %%mm0 ;\n"
" movq 16(%1), %%mm2 ;\n"
" pxor 8(%2), %%mm1 ;\n"
" pxor (%3), %%mm0 ;\n"
" pxor 16(%2), %%mm2 ;\n"
" pxor 8(%3), %%mm1 ;\n"
" pxor (%4), %%mm0 ;\n"
" movq 24(%1), %%mm3 ;\n"
" pxor 16(%3), %%mm2 ;\n"
" pxor 8(%4), %%mm1 ;\n"
" movq %%mm0, (%1) ;\n"
" movq 32(%1), %%mm4 ;\n"
" pxor 24(%2), %%mm3 ;\n"
" pxor 16(%4), %%mm2 ;\n"
" movq %%mm1, 8(%1) ;\n"
" movq 40(%1), %%mm5 ;\n"
" pxor 32(%2), %%mm4 ;\n"
" pxor 24(%3), %%mm3 ;\n"
" movq %%mm2, 16(%1) ;\n"
" pxor 40(%2), %%mm5 ;\n"
" pxor 32(%3), %%mm4 ;\n"
" pxor 24(%4), %%mm3 ;\n"
" movq %%mm3, 24(%1) ;\n"
" movq 56(%1), %%mm7 ;\n"
" movq 48(%1), %%mm6 ;\n"
" pxor 40(%3), %%mm5 ;\n"
" pxor 32(%4), %%mm4 ;\n"
" pxor 48(%2), %%mm6 ;\n"
" movq %%mm4, 32(%1) ;\n"
" pxor 56(%2), %%mm7 ;\n"
" pxor 40(%4), %%mm5 ;\n"
" pxor 48(%3), %%mm6 ;\n"
" pxor 56(%3), %%mm7 ;\n"
" movq %%mm5, 40(%1) ;\n"
" pxor 48(%4), %%mm6 ;\n"
" pxor 56(%4), %%mm7 ;\n"
" movq %%mm6, 48(%1) ;\n"
" movq %%mm7, 56(%1) ;\n"
" addq $64, %1 ;\n"
" addq $64, %2 ;\n"
" addq $64, %3 ;\n"
" addq $64, %4 ;\n"
" decq %0 ;\n"
" jnz 1b ;\n"
:
: "r" (lines),
"r" (p1), "r" (p2), "r" (p3), "r" (p4)
: "memory");
FPU_RESTORE;
}
static void
xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4, unsigned long *p5)
{
unsigned long lines = bytes >> 6;
char fpu_save[108];
FPU_SAVE;
__asm__ __volatile__ (
" .align 32,0x90 ;\n"
" 1: ;\n"
" movq (%1), %%mm0 ;\n"
" movq 8(%1), %%mm1 ;\n"
" pxor (%2), %%mm0 ;\n"
" pxor 8(%2), %%mm1 ;\n"
" movq 16(%1), %%mm2 ;\n"
" pxor (%3), %%mm0 ;\n"
" pxor 8(%3), %%mm1 ;\n"
" pxor 16(%2), %%mm2 ;\n"
" pxor (%4), %%mm0 ;\n"
" pxor 8(%4), %%mm1 ;\n"
" pxor 16(%3), %%mm2 ;\n"
" movq 24(%1), %%mm3 ;\n"
" pxor (%5), %%mm0 ;\n"
" pxor 8(%5), %%mm1 ;\n"
" movq %%mm0, (%1) ;\n"
" pxor 16(%4), %%mm2 ;\n"
" pxor 24(%2), %%mm3 ;\n"
" movq %%mm1, 8(%1) ;\n"
" pxor 16(%5), %%mm2 ;\n"
" pxor 24(%3), %%mm3 ;\n"
" movq 32(%1), %%mm4 ;\n"
" movq %%mm2, 16(%1) ;\n"
" pxor 24(%4), %%mm3 ;\n"
" pxor 32(%2), %%mm4 ;\n"
" movq 40(%1), %%mm5 ;\n"
" pxor 24(%5), %%mm3 ;\n"
" pxor 32(%3), %%mm4 ;\n"
" pxor 40(%2), %%mm5 ;\n"
" movq %%mm3, 24(%1) ;\n"
" pxor 32(%4), %%mm4 ;\n"
" pxor 40(%3), %%mm5 ;\n"
" movq 48(%1), %%mm6 ;\n"
" movq 56(%1), %%mm7 ;\n"
" pxor 32(%5), %%mm4 ;\n"
" pxor 40(%4), %%mm5 ;\n"
" pxor 48(%2), %%mm6 ;\n"
" pxor 56(%2), %%mm7 ;\n"
" movq %%mm4, 32(%1) ;\n"
" pxor 48(%3), %%mm6 ;\n"
" pxor 56(%3), %%mm7 ;\n"
" pxor 40(%5), %%mm5 ;\n"
" pxor 48(%4), %%mm6 ;\n"
" pxor 56(%4), %%mm7 ;\n"
" movq %%mm5, 40(%1) ;\n"
" pxor 48(%5), %%mm6 ;\n"
" pxor 56(%5), %%mm7 ;\n"
" movq %%mm6, 48(%1) ;\n"
" movq %%mm7, 56(%1) ;\n"
" addq $64, %1 ;\n"
" addq $64, %2 ;\n"
" addq $64, %3 ;\n"
" addq $64, %4 ;\n"
" addq $64, %5 ;\n"
" decq %0 ;\n"
" jnz 1b ;\n"
:
: "g" (lines),
"r" (p1), "r" (p2), "r" (p3), "r" (p4), "r" (p5)
: "memory");
FPU_RESTORE;
}
static struct xor_block_template xor_block_pII_mmx = {
name: "pII_mmx",
do_2: xor_pII_mmx_2,
do_3: xor_pII_mmx_3,
do_4: xor_pII_mmx_4,
do_5: xor_pII_mmx_5,
};
static struct xor_block_template xor_block_p5_mmx = {
name: "p5_mmx",
do_2: xor_p5_mmx_2,
do_3: xor_p5_mmx_3,
do_4: xor_p5_mmx_4,
do_5: xor_p5_mmx_5,
};
#undef FPU_SAVE
#undef FPU_RESTORE
/* /*
* Cache avoiding checksumming functions utilizing KNI instructions * x86-64 changes / gcc fixes from Andi Kleen.
* Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) * Copyright 2002 Andi Kleen, SuSE Labs.
*
* This hasn't been optimized for the hammer yet, but there are likely
* no advantages to be gotten from x86-64 here anyways.
*/ */
typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
/* Doesn't use gcc to save the XMM registers, because there is no easy way to
tell it to do a clts before the register saving. */
#define XMMS_SAVE \ #define XMMS_SAVE \
__asm__ __volatile__ ( \ asm volatile ( \
"movq %%cr0,%0 ;\n\t" \ "movq %%cr0,%0 ;\n\t" \
"clts ;\n\t" \ "clts ;\n\t" \
"movups %%xmm0,(%1) ;\n\t" \ "movups %%xmm0,(%1) ;\n\t" \
...@@ -543,7 +50,7 @@ static struct xor_block_template xor_block_p5_mmx = { ...@@ -543,7 +50,7 @@ static struct xor_block_template xor_block_p5_mmx = {
: "memory") : "memory")
#define XMMS_RESTORE \ #define XMMS_RESTORE \
__asm__ __volatile__ ( \ asm volatile ( \
"sfence ;\n\t" \ "sfence ;\n\t" \
"movups (%1),%%xmm0 ;\n\t" \ "movups (%1),%%xmm0 ;\n\t" \
"movups 0x10(%1),%%xmm1 ;\n\t" \ "movups 0x10(%1),%%xmm1 ;\n\t" \
...@@ -556,31 +63,31 @@ static struct xor_block_template xor_block_p5_mmx = { ...@@ -556,31 +63,31 @@ static struct xor_block_template xor_block_p5_mmx = {
#define OFFS(x) "16*("#x")" #define OFFS(x) "16*("#x")"
#define PF_OFFS(x) "256+16*("#x")" #define PF_OFFS(x) "256+16*("#x")"
#define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n" #define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
#define LD(x,y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n" #define LD(x,y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n" #define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
#define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n" #define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
#define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n" #define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
#define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n" #define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
#define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n" #define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
#define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n" #define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n"
#define XO1(x,y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n" #define XO1(x,y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
#define XO2(x,y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n" #define XO2(x,y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
#define XO3(x,y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n" #define XO3(x,y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
#define XO4(x,y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n" #define XO4(x,y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
#define XO5(x,y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n" #define XO5(x,y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n"
static void static void
xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
{ {
unsigned long lines = bytes >> 8; unsigned int lines = bytes >> 8;
char xmm_save[16*4];
unsigned long cr0; unsigned long cr0;
xmm_store_t xmm_save[4];
XMMS_SAVE; XMMS_SAVE;
__asm__ __volatile__ ( asm volatile (
#undef BLOCK #undef BLOCK
#define BLOCK(i) \ #define BLOCK(i) \
LD(i,0) \ LD(i,0) \
...@@ -612,13 +119,11 @@ xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) ...@@ -612,13 +119,11 @@ xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
BLOCK(8) BLOCK(8)
BLOCK(12) BLOCK(12)
" addq $256, %1 ;\n" " addq %[inc], %[p1] ;\n"
" addq $256, %2 ;\n" " addq %[inc], %[p2] ;\n"
" decq %0 ;\n" " decl %[cnt] ; jnz 1b"
" jnz 1b ;\n" : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
: : [inc] "r" (256UL)
: "r" (lines),
"r" (p1), "r" (p2)
: "memory"); : "memory");
XMMS_RESTORE; XMMS_RESTORE;
...@@ -628,8 +133,8 @@ static void ...@@ -628,8 +133,8 @@ static void
xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3) unsigned long *p3)
{ {
unsigned long lines = bytes >> 8; unsigned int lines = bytes >> 8;
char xmm_save[16*4]; xmm_store_t xmm_save[4];
unsigned long cr0; unsigned long cr0;
XMMS_SAVE; XMMS_SAVE;
...@@ -672,16 +177,14 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, ...@@ -672,16 +177,14 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
BLOCK(8) BLOCK(8)
BLOCK(12) BLOCK(12)
" addq $256, %1 ;\n" " addq %[inc], %[p1] ;\n"
" addq $256, %2 ;\n" " addq %[inc], %[p2] ;\n"
" addq $256, %3 ;\n" " addq %[inc], %[p3] ;\n"
" decq %0 ;\n" " decl %[cnt] ; jnz 1b"
" jnz 1b ;\n" : [cnt] "+r" (lines),
: [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
: "r" (lines), : [inc] "r" (256UL)
"r" (p1), "r"(p2), "r"(p3) : "memory");
: "memory" );
XMMS_RESTORE; XMMS_RESTORE;
} }
...@@ -689,8 +192,8 @@ static void ...@@ -689,8 +192,8 @@ static void
xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4) unsigned long *p3, unsigned long *p4)
{ {
unsigned long lines = bytes >> 8; unsigned int lines = bytes >> 8;
char xmm_save[16*4]; xmm_store_t xmm_save[4];
unsigned long cr0; unsigned long cr0;
XMMS_SAVE; XMMS_SAVE;
...@@ -739,15 +242,14 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, ...@@ -739,15 +242,14 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
BLOCK(8) BLOCK(8)
BLOCK(12) BLOCK(12)
" addq $256, %1 ;\n" " addq %[inc], %[p1] ;\n"
" addq $256, %2 ;\n" " addq %[inc], %[p2] ;\n"
" addq $256, %3 ;\n" " addq %[inc], %[p3] ;\n"
" addq $256, %4 ;\n" " addq %[inc], %[p4] ;\n"
" decq %0 ;\n" " decl %[cnt] ; jnz 1b"
" jnz 1b ;\n" : [cnt] "+c" (lines),
: [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
: "r" (lines), : [inc] "r" (256UL)
"r" (p1), "r" (p2), "r" (p3), "r" (p4)
: "memory" ); : "memory" );
XMMS_RESTORE; XMMS_RESTORE;
...@@ -757,8 +259,8 @@ static void ...@@ -757,8 +259,8 @@ static void
xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4, unsigned long *p5) unsigned long *p3, unsigned long *p4, unsigned long *p5)
{ {
unsigned long lines = bytes >> 8; unsigned int lines = bytes >> 8;
char xmm_save[16*4]; xmm_store_t xmm_save[4];
unsigned long cr0; unsigned long cr0;
XMMS_SAVE; XMMS_SAVE;
...@@ -813,47 +315,36 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, ...@@ -813,47 +315,36 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
BLOCK(8) BLOCK(8)
BLOCK(12) BLOCK(12)
" addq $256, %1 ;\n" " addq %[inc], %[p1] ;\n"
" addq $256, %2 ;\n" " addq %[inc], %[p2] ;\n"
" addq $256, %3 ;\n" " addq %[inc], %[p3] ;\n"
" addq $256, %4 ;\n" " addq %[inc], %[p4] ;\n"
" addq $256, %5 ;\n" " addq %[inc], %[p5] ;\n"
" decq %0 ;\n" " decl %[cnt] ; jnz 1b"
" jnz 1b ;\n" : [cnt] "+c" (lines),
: [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4),
: "r" (lines), [p5] "+r" (p5)
"r" (p1), "r" (p2), "r" (p3), "r" (p4), "r" (p5) : [inc] "r" (256UL)
: "memory"); : "memory");
XMMS_RESTORE; XMMS_RESTORE;
} }
static struct xor_block_template xor_block_pIII_sse = { static struct xor_block_template xor_block_sse = {
name: "pIII_sse", name: "generic_sse",
do_2: xor_sse_2, do_2: xor_sse_2,
do_3: xor_sse_3, do_3: xor_sse_3,
do_4: xor_sse_4, do_4: xor_sse_4,
do_5: xor_sse_5, do_5: xor_sse_5,
}; };
/* Also try the generic routines. */
#include <asm-generic/xor.h>
#undef XOR_TRY_TEMPLATES #undef XOR_TRY_TEMPLATES
#define XOR_TRY_TEMPLATES \ #define XOR_TRY_TEMPLATES \
do { \ do { \
xor_speed(&xor_block_8regs); \ xor_speed(&xor_block_sse); \
xor_speed(&xor_block_32regs); \
if (cpu_has_xmm) \
xor_speed(&xor_block_pIII_sse); \
if (md_cpu_has_mmx()) { \
xor_speed(&xor_block_pII_mmx); \
xor_speed(&xor_block_p5_mmx); \
} \
} while (0) } while (0)
/* We force the use of the SSE xor block because it can write around L2. /* We force the use of the SSE xor block because it can write around L2.
We may also be able to load into the L1 only depending on how the cpu We may also be able to load into the L1 only depending on how the cpu
deals with a load to a line that is being prefetched. */ deals with a load to a line that is being prefetched. */
#define XOR_SELECT_TEMPLATE(FASTEST) \ #define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
(cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment