Commit b10c4974 authored by Steve French's avatar Steve French

Merge bk://linux.bkbits.net/linux-2.5

into hostme.bitkeeper.com:/repos/c/cifs/linux-2.5cifs
parents ed426fea f0fdf5f8
......@@ -201,7 +201,7 @@ S: Maintained
AGPGART DRIVER
P: Dave Jones
M: davej@codemonkey.org.uk
W: http://www.codemonkey.org.uk/agp/
W: http://www.codemonkey.org.uk/projects/agp/
S: Maintained
AHA152X SCSI DRIVER
......
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 5
EXTRAVERSION =-rc2
EXTRAVERSION =-rc3
NAME=Feisty Dunnart
# *DOCUMENTATION*
......
......@@ -601,12 +601,8 @@ source "arch/arm/oprofile/Kconfig"
source "drivers/video/Kconfig"
if ARCH_ACORN || ARCH_CLPS7500 || ARCH_TBOX || ARCH_SHARK || ARCH_SA1100 || PCI
source "sound/Kconfig"
endif
source "drivers/misc/Kconfig"
source "drivers/usb/Kconfig"
......
......@@ -76,34 +76,27 @@ static struct vm_region consistent_head = {
.vm_end = CONSISTENT_END,
};
#if 0
static void vm_region_dump(struct vm_region *head, char *fn)
static struct vm_region *
vm_region_alloc(struct vm_region *head, size_t size, int gfp)
{
struct vm_region *c;
unsigned long addr = head->vm_start, end = head->vm_end - size;
unsigned long flags;
struct vm_region *c, *new;
printk("Consistent Allocation Map (%s):\n", fn);
list_for_each_entry(c, &head->vm_list, vm_list) {
printk(" %p: %08lx - %08lx (0x%08x)\n", c,
c->vm_start, c->vm_end, c->vm_end - c->vm_start);
}
}
#else
#define vm_region_dump(head,fn) do { } while(0)
#endif
new = kmalloc(sizeof(struct vm_region), gfp);
if (!new)
goto out;
static int vm_region_alloc(struct vm_region *head, struct vm_region *new, size_t size)
{
unsigned long addr = head->vm_start, end = head->vm_end - size;
struct vm_region *c;
spin_lock_irqsave(&consistent_lock, flags);
list_for_each_entry(c, &head->vm_list, vm_list) {
if ((addr + size) < addr)
goto out;
goto nospc;
if ((addr + size) <= c->vm_start)
goto found;
addr = c->vm_end;
if (addr > end)
goto out;
goto nospc;
}
found:
......@@ -114,10 +107,14 @@ static int vm_region_alloc(struct vm_region *head, struct vm_region *new, size_t
new->vm_start = addr;
new->vm_end = addr + size;
return 0;
spin_unlock_irqrestore(&consistent_lock, flags);
return new;
nospc:
spin_unlock_irqrestore(&consistent_lock, flags);
kfree(new);
out:
return -ENOMEM;
return NULL;
}
static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr)
......@@ -133,28 +130,46 @@ static struct vm_region *vm_region_find(struct vm_region *head, unsigned long ad
return c;
}
/*
* This allocates one page of cache-coherent memory space and returns
* both the virtual and a "dma" address to that space.
*/
void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle,
unsigned long cache_flags)
#ifdef CONFIG_HUGETLB_PAGE
#error ARM Coherent DMA allocator does not (yet) support huge TLB
#endif
static void *
__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, int gfp,
pgprot_t prot)
{
struct page *page;
struct vm_region *c;
unsigned long order, flags;
void *ret = NULL;
int res;
unsigned long order;
u64 mask = 0x00ffffff, limit; /* ISA default */
if (!consistent_pte) {
printk(KERN_ERR "consistent_alloc: not initialised\n");
printk(KERN_ERR "%s: not initialised\n", __func__);
dump_stack();
return NULL;
}
if (dev) {
mask = dev->coherent_dma_mask;
if (mask == 0) {
dev_warn(dev, "coherent DMA mask is unset\n");
return NULL;
}
}
size = PAGE_ALIGN(size);
limit = (mask + 1) & ~mask;
if ((limit && size >= limit) || size >= (CONSISTENT_END - CONSISTENT_BASE)) {
printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
size, mask);
return NULL;
}
order = get_order(size);
if (mask != 0xffffffff)
gfp |= GFP_DMA;
page = alloc_pages(gfp, order);
if (!page)
goto no_page;
......@@ -165,36 +180,18 @@ void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle,
*/
{
unsigned long kaddr = (unsigned long)page_address(page);
dmac_inv_range(kaddr, kaddr + size);
memset(page_address(page), 0, size);
dmac_flush_range(kaddr, kaddr + size);
}
/*
* Our housekeeping doesn't need to come from DMA,
* but it must not come from highmem.
*/
c = kmalloc(sizeof(struct vm_region),
gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
if (!c)
goto no_remap;
/*
* Attempt to allocate a virtual address in the
* consistent mapping region.
* Allocate a virtual address in the consistent mapping region.
*/
spin_lock_irqsave(&consistent_lock, flags);
vm_region_dump(&consistent_head, "before alloc");
res = vm_region_alloc(&consistent_head, c, size);
vm_region_dump(&consistent_head, "after alloc");
spin_unlock_irqrestore(&consistent_lock, flags);
if (!res) {
c = vm_region_alloc(&consistent_head, size,
gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
if (c) {
pte_t *pte = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
struct page *end = page + (1 << order);
pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
L_PTE_DIRTY | L_PTE_WRITE |
cache_flags);
/*
* Set the "dma handle"
......@@ -220,38 +217,43 @@ void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle,
page++;
}
ret = (void *)c->vm_start;
return (void *)c->vm_start;
}
no_remap:
if (ret == NULL) {
kfree(c);
if (page)
__free_pages(page, order);
}
no_page:
return ret;
return NULL;
}
EXPORT_SYMBOL(consistent_alloc);
/*
* Since we have the DMA mask available to us here, we could try to do
* a normal allocation, and only fall back to a "DMA" allocation if the
* resulting bus address does not satisfy the dma_mask requirements.
* Allocate DMA-coherent memory space and return both the kernel remapped
* virtual and bus address for that space.
*/
void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp)
{
if (dev == NULL || *dev->dma_mask != 0xffffffff)
gfp |= GFP_DMA;
return consistent_alloc(gfp, size, handle, 0);
return __dma_alloc(dev, size, handle, gfp,
pgprot_noncached(pgprot_kernel));
}
EXPORT_SYMBOL(dma_alloc_coherent);
/*
* Allocate a writecombining region, in much the same way as
* dma_alloc_coherent above.
*/
void *
dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int gfp)
{
return __dma_alloc(dev, size, handle, gfp,
pgprot_writecombine(pgprot_kernel));
}
EXPORT_SYMBOL(dma_alloc_writecombine);
/*
* free a page as defined by the above mapping.
*/
void consistent_free(void *vaddr, size_t size, dma_addr_t handle)
void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
{
struct vm_region *c;
unsigned long flags;
......@@ -260,15 +262,14 @@ void consistent_free(void *vaddr, size_t size, dma_addr_t handle)
size = PAGE_ALIGN(size);
spin_lock_irqsave(&consistent_lock, flags);
vm_region_dump(&consistent_head, "before free");
c = vm_region_find(&consistent_head, (unsigned long)vaddr);
c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
if (!c)
goto no_area;
if ((c->vm_end - c->vm_start) != size) {
printk(KERN_ERR "consistent_free: wrong size (%ld != %d)\n",
c->vm_end - c->vm_start, size);
printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
__func__, c->vm_end - c->vm_start, size);
dump_stack();
size = c->vm_end - c->vm_start;
}
......@@ -292,15 +293,14 @@ void consistent_free(void *vaddr, size_t size, dma_addr_t handle)
}
}
printk(KERN_CRIT "consistent_free: bad page in kernel page "
"table\n");
printk(KERN_CRIT "%s: bad page in kernel page table\n",
__func__);
} while (size -= PAGE_SIZE);
flush_tlb_kernel_range(c->vm_start, c->vm_end);
list_del(&c->vm_list);
vm_region_dump(&consistent_head, "after free");
spin_unlock_irqrestore(&consistent_lock, flags);
kfree(c);
......@@ -308,11 +308,11 @@ void consistent_free(void *vaddr, size_t size, dma_addr_t handle)
no_area:
spin_unlock_irqrestore(&consistent_lock, flags);
printk(KERN_ERR "consistent_free: trying to free "
"invalid area: %p\n", vaddr);
printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
__func__, cpu_addr);
dump_stack();
}
EXPORT_SYMBOL(consistent_free);
EXPORT_SYMBOL(dma_free_coherent);
/*
* Initialise the consistent memory allocation.
......@@ -330,7 +330,7 @@ static int __init consistent_init(void)
pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE);
if (!pmd) {
printk(KERN_ERR "consistent_init: no pmd tables\n");
printk(KERN_ERR "%s: no pmd tables\n", __func__);
ret = -ENOMEM;
break;
}
......@@ -338,7 +338,7 @@ static int __init consistent_init(void)
pte = pte_alloc_kernel(&init_mm, pmd, CONSISTENT_BASE);
if (!pte) {
printk(KERN_ERR "consistent_init: no pte tables\n");
printk(KERN_ERR "%s: no pte tables\n", __func__);
ret = -ENOMEM;
break;
}
......
......@@ -100,6 +100,10 @@ CONFIG_X86_MCE_P4THERMAL=y
# CONFIG_MICROCODE is not set
# CONFIG_X86_MSR is not set
# CONFIG_X86_CPUID is not set
#
# Firmware Drivers
#
# CONFIG_EDD is not set
CONFIG_NOHIGHMEM=y
# CONFIG_HIGHMEM4G is not set
......@@ -157,10 +161,12 @@ CONFIG_ACPI_SYSTEM=y
#
CONFIG_PCI=y
# CONFIG_PCI_GOBIOS is not set
# CONFIG_PCI_GOMMCONFIG is not set
# CONFIG_PCI_GODIRECT is not set
CONFIG_PCI_GOANY=y
CONFIG_PCI_BIOS=y
CONFIG_PCI_DIRECT=y
CONFIG_PCI_MMCONFIG=y
# CONFIG_PCI_USE_VECTOR is not set
CONFIG_PCI_LEGACY_PROC=y
CONFIG_PCI_NAMES=y
......@@ -237,8 +243,8 @@ CONFIG_BLK_DEV_FD=y
# CONFIG_BLK_DEV_UMEM is not set
# CONFIG_BLK_DEV_LOOP is not set
# CONFIG_BLK_DEV_NBD is not set
# CONFIG_BLK_DEV_CARMEL is not set
# CONFIG_BLK_DEV_RAM is not set
# CONFIG_BLK_DEV_INITRD is not set
CONFIG_LBD=y
#
......@@ -282,6 +288,7 @@ CONFIG_BLK_DEV_ADMA=y
# CONFIG_BLK_DEV_AEC62XX is not set
# CONFIG_BLK_DEV_ALI15X3 is not set
# CONFIG_BLK_DEV_AMD74XX is not set
# CONFIG_BLK_DEV_ATIIXP is not set
# CONFIG_BLK_DEV_CMD64X is not set
# CONFIG_BLK_DEV_TRIFLEX is not set
# CONFIG_BLK_DEV_CY82C693 is not set
......@@ -329,6 +336,12 @@ CONFIG_SCSI_REPORT_LUNS=y
# CONFIG_SCSI_CONSTANTS is not set
# CONFIG_SCSI_LOGGING is not set
#
# SCSI Transport Attributes
#
# CONFIG_SCSI_SPI_ATTRS is not set
# CONFIG_SCSI_FC_ATTRS is not set
#
# SCSI low-level drivers
#
......@@ -348,7 +361,9 @@ CONFIG_SCSI_SATA=y
# CONFIG_SCSI_SATA_SVW is not set
CONFIG_SCSI_ATA_PIIX=y
# CONFIG_SCSI_SATA_PROMISE is not set
# CONFIG_SCSI_SATA_SIL is not set
# CONFIG_SCSI_SATA_VIA is not set
# CONFIG_SCSI_SATA_VITESSE is not set
# CONFIG_SCSI_BUSLOGIC is not set
# CONFIG_SCSI_CPQFCTS is not set
# CONFIG_SCSI_DMX3191D is not set
......@@ -438,10 +453,6 @@ CONFIG_IEEE1394_RAWIO=y
#
# CONFIG_I2O is not set
#
# Macintosh device drivers
#
#
# Networking support
#
......@@ -463,7 +474,6 @@ CONFIG_IP_MULTICAST=y
# CONFIG_NET_IPGRE is not set
# CONFIG_IP_MROUTE is not set
# CONFIG_ARPD is not set
# CONFIG_INET_ECN is not set
# CONFIG_SYN_COOKIES is not set
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
......@@ -533,7 +543,6 @@ CONFIG_IP_NF_ARP_MANGLE=y
#
# SCTP Configuration (EXPERIMENTAL)
#
CONFIG_IPV6_SCTP__=y
# CONFIG_IP_SCTP is not set
# CONFIG_ATM is not set
# CONFIG_VLAN_8021Q is not set
......@@ -654,6 +663,7 @@ CONFIG_8139_RXBUF_IDX=2
# CONFIG_NET_FC is not set
# CONFIG_RCPCI is not set
# CONFIG_SHAPER is not set
# CONFIG_NETCONSOLE is not set
#
# Wan interfaces
......@@ -674,6 +684,8 @@ CONFIG_8139_RXBUF_IDX=2
# Bluetooth support
#
# CONFIG_BT is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
#
# ISDN subsystem
......@@ -720,6 +732,7 @@ CONFIG_SERIO_I8042=y
CONFIG_INPUT_KEYBOARD=y
CONFIG_KEYBOARD_ATKBD=y
# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_LKKBD is not set
# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_KEYBOARD_NEWTON is not set
CONFIG_INPUT_MOUSE=y
......@@ -728,6 +741,7 @@ CONFIG_MOUSE_PS2=y
# CONFIG_MOUSE_INPORT is not set
# CONFIG_MOUSE_LOGIBM is not set
# CONFIG_MOUSE_PC110PAD is not set
# CONFIG_MOUSE_VSXXXAA is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
# CONFIG_INPUT_MISC is not set
......@@ -760,11 +774,6 @@ CONFIG_PRINTER=y
# CONFIG_LP_CONSOLE is not set
# CONFIG_PPDEV is not set
# CONFIG_TIPAR is not set
#
# Mice
#
# CONFIG_BUSMOUSE is not set
# CONFIG_QIC02_TAPE is not set
#
......@@ -817,6 +826,11 @@ CONFIG_DRM_I830=y
#
# CONFIG_I2C is not set
#
# Misc devices
#
# CONFIG_IBM_ASM is not set
#
# Multimedia devices
#
......@@ -849,6 +863,9 @@ CONFIG_SOUND=y
# Advanced Linux Sound Architecture
#
CONFIG_SND=y
CONFIG_SND_TIMER=y
CONFIG_SND_PCM=y
CONFIG_SND_RAWMIDI=y
CONFIG_SND_SEQUENCER=y
# CONFIG_SND_SEQ_DUMMY is not set
CONFIG_SND_OSSEMUL=y
......@@ -861,6 +878,7 @@ CONFIG_SND_SEQUENCER_OSS=y
#
# Generic devices
#
CONFIG_SND_MPU401_UART=y
# CONFIG_SND_DUMMY is not set
# CONFIG_SND_VIRMIDI is not set
# CONFIG_SND_MTPAV is not set
......@@ -896,13 +914,19 @@ CONFIG_SND_SEQUENCER_OSS=y
#
# PCI devices
#
CONFIG_SND_AC97_CODEC=y
# CONFIG_SND_ALI5451 is not set
# CONFIG_SND_ATIIXP is not set
# CONFIG_SND_AU8810 is not set
# CONFIG_SND_AU8820 is not set
# CONFIG_SND_AU8830 is not set
# CONFIG_SND_AZT3328 is not set
# CONFIG_SND_BT87X is not set
# CONFIG_SND_CS46XX is not set
# CONFIG_SND_CS4281 is not set
# CONFIG_SND_EMU10K1 is not set
# CONFIG_SND_KORG1212 is not set
# CONFIG_SND_MIXART is not set
# CONFIG_SND_NM256 is not set
# CONFIG_SND_RME32 is not set
# CONFIG_SND_RME96 is not set
......@@ -921,6 +945,7 @@ CONFIG_SND_SEQUENCER_OSS=y
# CONFIG_SND_ICE1712 is not set
# CONFIG_SND_ICE1724 is not set
CONFIG_SND_INTEL8X0=y
# CONFIG_SND_INTEL8X0M is not set
# CONFIG_SND_SONICVIBES is not set
# CONFIG_SND_VIA82XX is not set
# CONFIG_SND_VX222 is not set
......@@ -952,6 +977,7 @@ CONFIG_USB_DEVICEFS=y
# USB Host Controller Drivers
#
CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_EHCI_SPLIT_ISO is not set
# CONFIG_USB_OHCI_HCD is not set
CONFIG_USB_UHCI_HCD=y
......@@ -985,7 +1011,9 @@ CONFIG_USB_HIDINPUT=y
# CONFIG_USB_WACOM is not set
# CONFIG_USB_KBTAB is not set
# CONFIG_USB_POWERMATE is not set
# CONFIG_USB_MTOUCH is not set
# CONFIG_USB_XPAD is not set
# CONFIG_USB_ATI_REMOTE is not set
#
# USB Imaging devices
......@@ -1031,7 +1059,6 @@ CONFIG_USB_HIDINPUT=y
# CONFIG_USB_AUERSWALD is not set
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_BRLVGER is not set
# CONFIG_USB_LCD is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_TEST is not set
......@@ -1120,11 +1147,12 @@ CONFIG_NFSD=y
CONFIG_LOCKD=y
CONFIG_EXPORTFS=y
CONFIG_SUNRPC=y
# CONFIG_SUNRPC_GSS is not set
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_SMB_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
# CONFIG_INTERMEZZO_FS is not set
# CONFIG_AFS_FS is not set
#
......
......@@ -60,6 +60,7 @@
#include <linux/vfs.h>
#include <linux/ptrace.h>
#include <linux/highuid.h>
#include <linux/vmalloc.h>
#include <asm/mman.h>
#include <asm/types.h>
#include <asm/uaccess.h>
......
......@@ -34,9 +34,13 @@ extern int (*set_rtc)(void);
static struct i2c_client *rtc_client;
static const unsigned char days_in_mon[] =
{ 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
static unsigned int rtc_epoch = 1900;
#define CMOS_CHECKSUM (63)
/*
* Acorn machines store the year in the static RAM at
* location 128.
*/
#define CMOS_YEAR (64 + 128)
static inline int rtc_command(int cmd, void *data)
......@@ -49,6 +53,38 @@ static inline int rtc_command(int cmd, void *data)
return ret;
}
/*
* Update the century + year bytes in the CMOS RAM, ensuring
* that the check byte is correctly adjusted for the change.
*/
static int rtc_update_year(unsigned int new_year)
{
unsigned char yr[2], chk;
struct mem cmos_year = { CMOS_YEAR, sizeof(yr), yr };
struct mem cmos_check = { CMOS_CHECKSUM, 1, &chk };
int ret;
ret = rtc_command(MEM_READ, &cmos_check);
if (ret)
goto out;
ret = rtc_command(MEM_READ, &cmos_year);
if (ret)
goto out;
chk -= yr[1] + yr[0];
yr[1] = new_year / 100;
yr[0] = new_year % 100;
chk += yr[1] + yr[0];
ret = rtc_command(MEM_WRITE, &cmos_year);
if (ret == 0)
ret = rtc_command(MEM_WRITE, &cmos_check);
out:
return ret;
}
/*
* Read the current RTC time and date, and update xtime.
*/
......@@ -56,45 +92,51 @@ static void get_rtc_time(struct rtc_tm *rtctm, unsigned int *year)
{
unsigned char ctrl, yr[2];
struct mem rtcmem = { CMOS_YEAR, sizeof(yr), yr };
int real_year, year_offset;
/*
* Ensure that the RTC is running.
*/
rtc_command(RTC_GETCTRL, &ctrl);
if (ctrl & 0xc0) {
unsigned char new_ctrl;
new_ctrl = ctrl & ~0xc0;
unsigned char new_ctrl = ctrl & ~0xc0;
printk("RTC: resetting control %02X -> %02X\n",
printk(KERN_WARNING "RTC: resetting control %02x -> %02x\n",
ctrl, new_ctrl);
rtc_command(RTC_SETCTRL, &new_ctrl);
}
/*
* Acorn machines store the year in
* the static RAM at location 192.
*/
if (rtc_command(MEM_READ, &rtcmem))
if (rtc_command(RTC_GETDATETIME, rtctm) ||
rtc_command(MEM_READ, &rtcmem))
return;
if (rtc_command(RTC_GETDATETIME, rtctm))
return;
real_year = yr[0];
*year = yr[1] * 100 + yr[0];
/*
* The RTC year holds the LSB two bits of the current
* year, which should reflect the LSB two bits of the
* CMOS copy of the year. Any difference indicates
* that we have to correct the CMOS version.
*/
year_offset = rtctm->year_off - (real_year & 3);
if (year_offset < 0)
/*
* RTC year wrapped. Adjust it appropriately.
*/
year_offset += 4;
*year = real_year + year_offset + yr[1] * 100;
}
static int set_rtc_time(struct rtc_tm *rtctm, unsigned int year)
{
unsigned char yr[2], leap, chk;
struct mem cmos_year = { CMOS_YEAR, sizeof(yr), yr };
struct mem cmos_check = { CMOS_CHECKSUM, 1, &chk };
unsigned char leap;
int ret;
leap = (!(year % 4) && (year % 100)) || !(year % 400);
if (rtctm->mon > 12 || rtctm->mday == 0)
if (rtctm->mon > 12 || rtctm->mon == 0 || rtctm->mday == 0)
return -EINVAL;
if (rtctm->mday > (days_in_mon[rtctm->mon] + (rtctm->mon == 2 && leap)))
......@@ -103,21 +145,16 @@ static int set_rtc_time(struct rtc_tm *rtctm, unsigned int year)
if (rtctm->hours >= 24 || rtctm->mins >= 60 || rtctm->secs >= 60)
return -EINVAL;
ret = rtc_command(RTC_SETDATETIME, rtctm);
if (ret == 0) {
rtc_command(MEM_READ, &cmos_check);
rtc_command(MEM_READ, &cmos_year);
chk -= yr[1] + yr[0];
yr[1] = year / 100;
yr[0] = year % 100;
/*
* The RTC's own 2-bit year must reflect the least
* significant two bits of the CMOS year.
*/
rtctm->year_off = (year % 100) & 3;
chk += yr[1] + yr[0];
ret = rtc_command(RTC_SETDATETIME, rtctm);
if (ret == 0)
ret = rtc_update_year(year);
rtc_command(MEM_WRITE, &cmos_year);
rtc_command(MEM_WRITE, &cmos_check);
}
return ret;
}
......@@ -189,13 +226,12 @@ static int rtc_ioctl(struct inode *inode, struct file *file,
rtc_raw.hours = rtctm.tm_hour;
rtc_raw.mday = rtctm.tm_mday;
rtc_raw.mon = rtctm.tm_mon + 1;
rtc_raw.year_off = 2;
year = rtctm.tm_year + 1900;
return set_rtc_time(&rtc_raw, year);
break;
case RTC_EPOCH_READ:
return put_user(rtc_epoch, (unsigned long *)arg);
return put_user(1900, (unsigned long *)arg);
}
return -EINVAL;
......
......@@ -469,11 +469,8 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
driver_geo.heads = drv->heads;
driver_geo.sectors = drv->sectors;
driver_geo.cylinders = drv->cylinders;
} else {
driver_geo.heads = 0xff;
driver_geo.sectors = 0x3f;
driver_geo.cylinders = (int)drv->nr_blocks / (0xff*0x3f);
}
} else
return -ENXIO;
driver_geo.start= get_start_sect(inode->i_bdev);
if (copy_to_user((void *) arg, &driver_geo,
sizeof( struct hd_geometry)))
......
......@@ -1058,6 +1058,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
int devtype;
/* for each physical lun, do an inquiry */
if (ld_buff->LUN[i][3] & 0xC0) continue;
memset(inq_buff, 0, sizeof(InquiryData_struct));
memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8);
......
......@@ -615,7 +615,7 @@ static int __init viocd_init(void)
VIOCD_DEVICE_DEVFS "%d", deviceno);
gendisk->queue = viocd_queue;
gendisk->fops = &viocd_fops;
gendisk->flags = GENHD_FL_CD;
gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE;
set_capacity(gendisk, 0);
gendisk->private_data = d;
d->viocd_disk = gendisk;
......
......@@ -77,7 +77,7 @@ config AGP_AMD64
config AGP_INTEL
tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support"
depends on AGP && X86
depends on AGP && X86 && !X86_64
help
This option gives you AGP support for the GLX component of XFree86 4.x
on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875,
......@@ -88,6 +88,16 @@ config AGP_INTEL
use GLX or DRI, or if you have any Intel integrated graphics
chipsets. If unsure, say Y.
config AGP_INTEL_MCH
tristate "Intel i865 chipset support"
depends on AGP && X86
help
This option gives you AGP support for the GLX component of XFree86 4.x
on Intel chipsets that support Intel EM64T processors.
You should say Y here if you use XFree86 3.3.6 or 4.x and want to
use GLX or DRI. If unsure, say Y.
config AGP_NVIDIA
tristate "NVIDIA nForce/nForce2 chipset support"
depends on AGP && X86 && !X86_64
......
......@@ -9,6 +9,7 @@ obj-$(CONFIG_AGP_ALPHA_CORE) += alpha-agp.o
obj-$(CONFIG_AGP_EFFICEON) += efficeon-agp.o
obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o
obj-$(CONFIG_AGP_I460) += i460-agp.o
obj-$(CONFIG_AGP_INTEL_MCH) += intel-mch-agp.o
obj-$(CONFIG_AGP_INTEL) += intel-agp.o
obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o
obj-$(CONFIG_AGP_SIS) += sis-agp.o
......
/*
* AGPGART
* Copyright (C) 2002-2003 Dave Jones
* Copyright (C) 2002-2004 Dave Jones
* Copyright (C) 1999 Jeff Hartmann
* Copyright (C) 1999 Precision Insight, Inc.
* Copyright (C) 1999 Xi Graphics, Inc.
......@@ -167,8 +167,6 @@ struct agp_bridge_data {
#define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page)
/* Chipset independant registers (from AGP Spec) */
#define AGP_APBASE 0x10
/* Intel registers */
#define INTEL_APSIZE 0xb4
......@@ -177,14 +175,6 @@ struct agp_bridge_data {
#define INTEL_NBXCFG 0x50
#define INTEL_ERRSTS 0x91
/* Intel 460GX Registers */
#define INTEL_I460_BAPBASE 0x98
#define INTEL_I460_GXBCTL 0xa0
#define INTEL_I460_AGPSIZ 0xa2
#define INTEL_I460_ATTBASE 0xfe200000
#define INTEL_I460_GATT_VALID (1UL << 24)
#define INTEL_I460_GATT_COHERENT (1UL << 25)
/* Intel i830 registers */
#define I830_GMCH_CTRL 0x52
#define I830_GMCH_ENABLED 0x4
......@@ -219,26 +209,10 @@ struct agp_bridge_data {
#define I852_GME 0x2
#define I852_GM 0x5
/* Intel 815 register */
#define INTEL_815_APCONT 0x51
#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
/* Intel i820 registers */
#define INTEL_I820_RDCR 0x51
#define INTEL_I820_ERRSTS 0xc8
/* Intel i840 registers */
#define INTEL_I840_MCHCFG 0x50
#define INTEL_I840_ERRSTS 0xc8
/* Intel i845 registers */
#define INTEL_I845_AGPM 0x51
#define INTEL_I845_ERRSTS 0xc8
/* Intel i850 registers */
#define INTEL_I850_MCHCFG 0x50
#define INTEL_I850_ERRSTS 0xc8
/* Intel i860 registers */
#define INTEL_I860_MCHCFG 0x50
#define INTEL_I860_ERRSTS 0xc8
......@@ -261,110 +235,6 @@ struct agp_bridge_data {
#define I810_DRAM_ROW_0 0x00000001
#define I810_DRAM_ROW_0_SDRAM 0x00000001
/* Intel 7505 registers */
#define INTEL_I7505_APSIZE 0x74
#define INTEL_I7505_NCAPID 0x60
#define INTEL_I7505_NISTAT 0x6c
#define INTEL_I7505_ATTBASE 0x78
#define INTEL_I7505_ERRSTS 0x42
#define INTEL_I7505_AGPCTRL 0x70
#define INTEL_I7505_MCHCFG 0x50
/* VIA register */
#define VIA_GARTCTRL 0x80
#define VIA_APSIZE 0x84
#define VIA_ATTBASE 0x88
/* VIA KT400 */
#define VIA_AGP3_GARTCTRL 0x90
#define VIA_AGP3_APSIZE 0x94
#define VIA_AGP3_ATTBASE 0x98
#define VIA_AGPSEL 0xfd
/* SiS registers */
#define SIS_ATTBASE 0x90
#define SIS_APSIZE 0x94
#define SIS_TLBCNTRL 0x97
#define SIS_TLBFLUSH 0x98
/* AMD registers */
#define AMD_MMBASE 0x14
#define AMD_APSIZE 0xac
#define AMD_MODECNTL 0xb0
#define AMD_MODECNTL2 0xb2
#define AMD_GARTENABLE 0x02 /* In mmio region (16-bit register) */
#define AMD_ATTBASE 0x04 /* In mmio region (32-bit register) */
#define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */
#define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */
/* AMD64 registers */
#define AMD64_GARTAPERTURECTL 0x90
#define AMD64_GARTAPERTUREBASE 0x94
#define AMD64_GARTTABLEBASE 0x98
#define AMD64_GARTCACHECTL 0x9c
#define AMD64_GARTEN (1<<0)
/* ALi registers */
#define ALI_AGPCTRL 0xb8
#define ALI_ATTBASE 0xbc
#define ALI_TLBCTRL 0xc0
#define ALI_TAGCTRL 0xc4
#define ALI_CACHE_FLUSH_CTRL 0xD0
#define ALI_CACHE_FLUSH_ADDR_MASK 0xFFFFF000
#define ALI_CACHE_FLUSH_EN 0x100
/* ATI register */
#define ATI_GART_MMBASE_ADDR 0x14
#define ATI_RS100_APSIZE 0xac
#define ATI_RS300_APSIZE 0xf8
#define ATI_RS100_IG_AGPMODE 0xb0
#define ATI_RS300_IG_AGPMODE 0xfc
#define ATI_GART_FEATURE_ID 0x00
#define ATI_GART_BASE 0x04
#define ATI_GART_CACHE_SZBASE 0x08
#define ATI_GART_CACHE_CNTRL 0x0c
#define ATI_GART_CACHE_ENTRY_CNTRL 0x10
/* Serverworks Registers */
#define SVWRKS_APSIZE 0x10
#define SVWRKS_SIZE_MASK 0xfe000000
#define SVWRKS_MMBASE 0x14
#define SVWRKS_CACHING 0x4b
#define SVWRKS_FEATURE 0x68
/* func 1 registers */
#define SVWRKS_AGP_ENABLE 0x60
#define SVWRKS_COMMAND 0x04
/* Memory mapped registers */
#define SVWRKS_GART_CACHE 0x02
#define SVWRKS_GATTBASE 0x04
#define SVWRKS_TLBFLUSH 0x10
#define SVWRKS_POSTFLUSH 0x14
#define SVWRKS_DIRFLUSH 0x0c
/* HP ZX1 SBA registers */
#define HP_ZX1_CTRL 0x200
#define HP_ZX1_IBASE 0x300
#define HP_ZX1_IMASK 0x308
#define HP_ZX1_PCOM 0x310
#define HP_ZX1_TCNFG 0x318
#define HP_ZX1_PDIR_BASE 0x320
#define HP_ZX1_CACHE_FLUSH 0x428
/* NVIDIA registers */
#define NVIDIA_0_APSIZE 0x80
#define NVIDIA_1_WBC 0xf0
#define NVIDIA_2_GARTCTRL 0xd0
#define NVIDIA_2_APBASE 0xd8
#define NVIDIA_2_APLIMIT 0xdc
#define NVIDIA_2_ATTBASE(i) (0xe0 + (i) * 4)
#define NVIDIA_3_APBASE 0x50
#define NVIDIA_3_APLIMIT 0x54
struct agp_device_ids {
unsigned short device_id; /* first, to make table easier to read */
enum chipset_type chipset;
......@@ -402,21 +272,38 @@ void global_cache_flush(void);
void get_agp_version(struct agp_bridge_data *bridge);
unsigned long agp_generic_mask_memory(unsigned long addr, int type);
/* generic routines for agp>=3 */
int agp3_generic_fetch_size(void);
void agp3_generic_tlbflush(struct agp_memory *mem);
int agp3_generic_configure(void);
void agp3_generic_cleanup(void);
/* aperture sizes have been standardised since v3 */
#define AGP_GENERIC_SIZES_ENTRIES 11
extern struct aper_size_info_16 agp3_generic_sizes[];
extern int agp_off;
extern int agp_try_unsupported_boot;
/* Standard agp registers */
#define AGPSTAT 0x4
#define AGPCMD 0x8
#define AGPNISTAT 0xc
#define AGPCTRL 0x10
#define AGPNEPG 0x16
#define AGPNICMD 0x20
/* Chipset independant registers (from AGP Spec) */
#define AGP_APBASE 0x10
#define AGPSTAT 0x4
#define AGPCMD 0x8
#define AGPNISTAT 0xc
#define AGPCTRL 0x10
#define AGPAPSIZE 0x14
#define AGPNEPG 0x16
#define AGPGARTLO 0x18
#define AGPGARTHI 0x1c
#define AGPNICMD 0x20
#define AGP_MAJOR_VERSION_SHIFT (20)
#define AGP_MINOR_VERSION_SHIFT (16)
#define AGPSTAT_RQ_DEPTH (0xff000000)
#define AGPSTAT_RQ_DEPTH_SHIFT 24
#define AGPSTAT_CAL_MASK (1<<12|1<<11|1<<10)
#define AGPSTAT_ARQSZ (1<<15|1<<14|1<<13)
......@@ -435,4 +322,7 @@ extern int agp_try_unsupported_boot;
#define AGPSTAT3_8X (1<<1)
#define AGPSTAT3_4X (1)
#endif /* _AGP_BACKEND_PRIV_H */
#define AGPCTRL_APERENB (1<<8)
#define AGPCTRL_GTLBEN (1<<7)
#endif /* _AGP_BACKEND_PRIV_H */
......@@ -9,6 +9,14 @@
#include <linux/agp_backend.h>
#include "agp.h"
#define ALI_AGPCTRL 0xb8
#define ALI_ATTBASE 0xbc
#define ALI_TLBCTRL 0xc0
#define ALI_TAGCTRL 0xc4
#define ALI_CACHE_FLUSH_CTRL 0xD0
#define ALI_CACHE_FLUSH_ADDR_MASK 0xFFFFF000
#define ALI_CACHE_FLUSH_EN 0x100
static int ali_fetch_size(void)
{
int i;
......
......@@ -11,6 +11,15 @@
#include <linux/mm.h>
#include "agp.h"
#define AMD_MMBASE 0x14
#define AMD_APSIZE 0xac
#define AMD_MODECNTL 0xb0
#define AMD_MODECNTL2 0xb2
#define AMD_GARTENABLE 0x02 /* In mmio region (16-bit register) */
#define AMD_ATTBASE 0x04 /* In mmio region (32-bit register) */
#define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */
#define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */
struct amd_page_map {
unsigned long *real;
unsigned long *remapped;
......
......@@ -31,6 +31,13 @@
#define INVGART (1<<0)
#define GARTPTEERR (1<<1)
/* K8 On-cpu GART registers */
#define AMD64_GARTAPERTURECTL 0x90
#define AMD64_GARTAPERTUREBASE 0x94
#define AMD64_GARTTABLEBASE 0x98
#define AMD64_GARTCACHECTL 0x9c
#define AMD64_GARTEN (1<<0)
/* NVIDIA K8 registers */
#define NVIDIA_X86_64_0_APBASE 0x10
#define NVIDIA_X86_64_1_APBASE1 0x50
......@@ -382,8 +389,9 @@ static void __devinit amd8151_init(struct pci_dev *pdev, struct agp_bridge_data
case 0x11: revstring="B0"; break;
case 0x12: revstring="B1"; break;
case 0x13: revstring="B2"; break;
case 0x14: revstring="B3"; break;
default: revstring="??"; break;
}
}
printk (KERN_INFO PFX "Detected AMD 8151 AGP Bridge rev %s\n", revstring);
......
......@@ -10,6 +10,18 @@
#include <asm/agp.h>
#include "agp.h"
#define ATI_GART_MMBASE_ADDR 0x14
#define ATI_RS100_APSIZE 0xac
#define ATI_RS100_IG_AGPMODE 0xb0
#define ATI_RS300_APSIZE 0xf8
#define ATI_RS300_IG_AGPMODE 0xfc
#define ATI_GART_FEATURE_ID 0x00
#define ATI_GART_BASE 0x04
#define ATI_GART_CACHE_SZBASE 0x08
#define ATI_GART_CACHE_CNTRL 0x0c
#define ATI_GART_CACHE_ENTRY_CNTRL 0x10
static struct aper_size_info_lvl2 ati_generic_sizes[7] =
{
{2048, 524288, 0x0000000c},
......
......@@ -880,7 +880,7 @@ EXPORT_SYMBOL(agp_generic_free_by_type);
void *agp_generic_alloc_page(void)
{
struct page * page;
page = alloc_page(GFP_KERNEL);
if (page == NULL)
return 0;
......@@ -956,3 +956,85 @@ unsigned long agp_generic_mask_memory(unsigned long addr, int type)
}
EXPORT_SYMBOL(agp_generic_mask_memory);
/*
* These functions are implemented according to the AGPv3 spec,
* which covers implementation details that had previously been
* left open.
*/
int agp3_generic_fetch_size(void)
{
u16 temp_size;
int i;
struct aper_size_info_16 *values;
pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
if (temp_size == values[i].size_value) {
agp_bridge->previous_size =
agp_bridge->current_size = (void *) (values + i);
agp_bridge->aperture_size_idx = i;
return values[i].size;
}
}
return 0;
}
EXPORT_SYMBOL(agp3_generic_fetch_size);
void agp3_generic_tlbflush(struct agp_memory *mem)
{
u32 ctrl;
pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
}
EXPORT_SYMBOL(agp3_generic_tlbflush);
int agp3_generic_configure(void)
{
u32 temp;
struct aper_size_info_16 *current_size;
current_size = A_SIZE_16(agp_bridge->current_size);
pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
/* set aperture size */
pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
/* set gart pointer */
pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
/* enable aperture and GTLB */
pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
return 0;
}
EXPORT_SYMBOL(agp3_generic_configure);
void agp3_generic_cleanup(void)
{
u32 ctrl;
pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
}
EXPORT_SYMBOL(agp3_generic_cleanup);
struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
{
{4096, 1048576, 10,0x000},
{2048, 524288, 9, 0x800},
{1024, 262144, 8, 0xc00},
{ 512, 131072, 7, 0xe00},
{ 256, 65536, 6, 0xf00},
{ 128, 32768, 5, 0xf20},
{ 64, 16384, 4, 0xf30},
{ 32, 8192, 3, 0xf38},
{ 16, 4096, 2, 0xf3c},
{ 8, 2048, 1, 0xf3e},
{ 4, 1024, 0, 0xf3f}
};
EXPORT_SYMBOL(agp3_generic_sizes);
......@@ -14,6 +14,13 @@
#include "agp.h"
#define INTEL_I460_BAPBASE 0x98
#define INTEL_I460_GXBCTL 0xa0
#define INTEL_I460_AGPSIZ 0xa2
#define INTEL_I460_ATTBASE 0xfe200000
#define INTEL_I460_GATT_VALID (1UL << 24)
#define INTEL_I460_GATT_COHERENT (1UL << 25)
/*
* The i460 can operate with large (4MB) pages, but there is no sane way to support this
* within the current kernel/DRM environment, so we disable the relevant code for now.
......
......@@ -13,6 +13,31 @@
#include <linux/agp_backend.h>
#include "agp.h"
/* Intel 815 register */
#define INTEL_815_APCONT 0x51
#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
/* Intel i820 registers */
#define INTEL_I820_RDCR 0x51
#define INTEL_I820_ERRSTS 0xc8
/* Intel i840 registers */
#define INTEL_I840_MCHCFG 0x50
#define INTEL_I840_ERRSTS 0xc8
/* Intel i850 registers */
#define INTEL_I850_MCHCFG 0x50
#define INTEL_I850_ERRSTS 0xc8
/* Intel 7505 registers */
#define INTEL_I7505_APSIZE 0x74
#define INTEL_I7505_NCAPID 0x60
#define INTEL_I7505_NISTAT 0x6c
#define INTEL_I7505_ATTBASE 0x78
#define INTEL_I7505_ERRSTS 0x42
#define INTEL_I7505_AGPCTRL 0x70
#define INTEL_I7505_MCHCFG 0x50
static struct aper_size_info_fixed intel_i810_sizes[] =
{
{64, 16384, 4},
......@@ -942,7 +967,7 @@ static struct aper_size_info_8 intel_830mp_sizes[4] =
{32, 8192, 3, 56}
};
struct agp_bridge_driver intel_generic_driver = {
static struct agp_bridge_driver intel_generic_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_generic_sizes,
.size_type = U16_APER_SIZE,
......@@ -965,7 +990,7 @@ struct agp_bridge_driver intel_generic_driver = {
.agp_destroy_page = agp_generic_destroy_page,
};
struct agp_bridge_driver intel_810_driver = {
static struct agp_bridge_driver intel_810_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_i810_sizes,
.size_type = FIXED_APER_SIZE,
......@@ -989,8 +1014,7 @@ struct agp_bridge_driver intel_810_driver = {
.agp_destroy_page = agp_generic_destroy_page,
};
struct agp_bridge_driver intel_815_driver = {
static struct agp_bridge_driver intel_815_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_815_sizes,
.size_type = U8_APER_SIZE,
......@@ -1013,7 +1037,7 @@ struct agp_bridge_driver intel_815_driver = {
.agp_destroy_page = agp_generic_destroy_page,
};
struct agp_bridge_driver intel_830_driver = {
static struct agp_bridge_driver intel_830_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_i830_sizes,
.size_type = FIXED_APER_SIZE,
......@@ -1037,8 +1061,7 @@ struct agp_bridge_driver intel_830_driver = {
.agp_destroy_page = agp_generic_destroy_page,
};
struct agp_bridge_driver intel_820_driver = {
static struct agp_bridge_driver intel_820_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
......@@ -1061,7 +1084,7 @@ struct agp_bridge_driver intel_820_driver = {
.agp_destroy_page = agp_generic_destroy_page,
};
struct agp_bridge_driver intel_830mp_driver = {
static struct agp_bridge_driver intel_830mp_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_830mp_sizes,
.size_type = U8_APER_SIZE,
......@@ -1084,7 +1107,7 @@ struct agp_bridge_driver intel_830mp_driver = {
.agp_destroy_page = agp_generic_destroy_page,
};
struct agp_bridge_driver intel_840_driver = {
static struct agp_bridge_driver intel_840_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
......@@ -1107,7 +1130,7 @@ struct agp_bridge_driver intel_840_driver = {
.agp_destroy_page = agp_generic_destroy_page,
};
struct agp_bridge_driver intel_845_driver = {
static struct agp_bridge_driver intel_845_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
......@@ -1130,7 +1153,7 @@ struct agp_bridge_driver intel_845_driver = {
.agp_destroy_page = agp_generic_destroy_page,
};
struct agp_bridge_driver intel_850_driver = {
static struct agp_bridge_driver intel_850_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
......@@ -1153,7 +1176,7 @@ struct agp_bridge_driver intel_850_driver = {
.agp_destroy_page = agp_generic_destroy_page,
};
struct agp_bridge_driver intel_860_driver = {
static struct agp_bridge_driver intel_860_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
......@@ -1176,7 +1199,7 @@ struct agp_bridge_driver intel_860_driver = {
.agp_destroy_page = agp_generic_destroy_page,
};
struct agp_bridge_driver intel_7505_driver = {
static struct agp_bridge_driver intel_7505_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
......
This diff is collapsed.
......@@ -13,6 +13,17 @@
#include <linux/mm.h>
#include "agp.h"
/* NVIDIA registers */
#define NVIDIA_0_APSIZE 0x80
#define NVIDIA_1_WBC 0xf0
#define NVIDIA_2_GARTCTRL 0xd0
#define NVIDIA_2_APBASE 0xd8
#define NVIDIA_2_APLIMIT 0xdc
#define NVIDIA_2_ATTBASE(i) (0xe0 + (i) * 4)
#define NVIDIA_3_APBASE 0x50
#define NVIDIA_3_APLIMIT 0x54
static struct _nvidia_private {
struct pci_dev *dev_1;
struct pci_dev *dev_2;
......
......@@ -8,6 +8,12 @@
#include <linux/agp_backend.h>
#include "agp.h"
#define SIS_ATTBASE 0x90
#define SIS_APSIZE 0x94
#define SIS_TLBCNTRL 0x97
#define SIS_TLBFLUSH 0x98
static int sis_fetch_size(void)
{
u8 temp_size;
......@@ -61,6 +67,42 @@ static void sis_cleanup(void)
(previous_size->size_value & ~(0x03)));
}
static void sis_648_enable(u32 mode)
{
struct pci_dev *device = NULL;
u32 command;
int rate;
printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n",
agp_bridge->major_version,
agp_bridge->minor_version,
agp_bridge->dev->slot_name);
pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &command);
command = agp_collect_device_status(mode, command);
command |= AGPSTAT_AGP_ENABLE;
rate = (command & 0x7) << 2;
while ((device = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, device)) != NULL) {
u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
if (!agp)
continue;
printk(KERN_INFO PFX "Putting AGP V3 device at %s into %dx mode\n",
pci_name(device), rate);
pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command);
if(device->device == PCI_DEVICE_ID_SI_648) {
// weird: on 648 and 648fx chipsets any rate change in the target command register
// triggers a 5ms screwup during which the master cannot be configured
printk(KERN_INFO PFX "sis 648 agp fix - giving bridge time to recover\n");
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout (1+(HZ*10)/1000);
}
}
}
static struct aper_size_info_8 sis_generic_sizes[7] =
{
{256, 65536, 6, 99},
......@@ -176,6 +218,26 @@ static struct agp_device_ids sis_agp_device_ids[] __devinitdata =
{ }, /* dummy final entry, always present */
};
static void __devinit sis_get_driver(struct agp_bridge_data *bridge)
{
if (bridge->dev->device == PCI_DEVICE_ID_SI_648) {
if (agp_bridge->major_version == 3 && agp_bridge->minor_version < 5) {
sis_driver.agp_enable=sis_648_enable;
} else {
sis_driver.agp_enable = sis_648_enable;
sis_driver.aperture_sizes = agp3_generic_sizes;
sis_driver.size_type = U16_APER_SIZE;
sis_driver.num_aperture_sizes = AGP_GENERIC_SIZES_ENTRIES;
sis_driver.configure = agp3_generic_configure;
sis_driver.fetch_size = agp3_generic_fetch_size;
sis_driver.cleanup = agp3_generic_cleanup;
sis_driver.tlb_flush = agp3_generic_tlbflush;
}
}
}
static int __devinit agp_sis_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
......@@ -210,10 +272,11 @@ static int __devinit agp_sis_probe(struct pci_dev *pdev,
bridge->dev = pdev;
bridge->capndx = cap_ptr;
get_agp_version(bridge);
/* Fill in the mode register */
pci_read_config_dword(pdev,
bridge->capndx+PCI_AGP_STATUS,
&bridge->mode);
pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
sis_get_driver(bridge);
pci_set_drvdata(pdev, bridge);
return agp_add_bridge(bridge);
......
......@@ -8,6 +8,23 @@
#include <linux/agp_backend.h>
#include "agp.h"
#define SVWRKS_COMMAND 0x04
#define SVWRKS_APSIZE 0x10
#define SVWRKS_MMBASE 0x14
#define SVWRKS_CACHING 0x4b
#define SVWRKS_AGP_ENABLE 0x60
#define SVWRKS_FEATURE 0x68
#define SVWRKS_SIZE_MASK 0xfe000000
/* Memory mapped registers */
#define SVWRKS_GART_CACHE 0x02
#define SVWRKS_GATTBASE 0x04
#define SVWRKS_TLBFLUSH 0x10
#define SVWRKS_POSTFLUSH 0x14
#define SVWRKS_DIRFLUSH 0x0c
struct serverworks_page_map {
unsigned long *real;
unsigned long *remapped;
......@@ -454,10 +471,17 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
}
switch (pdev->device) {
case 0x0006:
/* ServerWorks CNB20HE
Fail silently.*/
printk (KERN_ERR PFX "Detected ServerWorks CNB20HE chipset: No AGP present.\n");
return -ENODEV;
case PCI_DEVICE_ID_SERVERWORKS_HE:
case PCI_DEVICE_ID_SERVERWORKS_LE:
case 0x0007:
break;
default:
printk(KERN_ERR PFX "Unsupported Serverworks chipset "
"(device id: %04x)\n", pdev->device);
......
......@@ -9,6 +9,15 @@
#include <linux/agp_backend.h>
#include "agp.h"
#define VIA_GARTCTRL 0x80
#define VIA_APSIZE 0x84
#define VIA_ATTBASE 0x88
#define VIA_AGP3_GARTCTRL 0x90
#define VIA_AGP3_APSIZE 0x94
#define VIA_AGP3_ATTBASE 0x98
#define VIA_AGPSEL 0xfd
static int via_fetch_size(void)
{
int i;
......
This diff is collapsed.
......@@ -3178,13 +3178,14 @@ DECLARE_WAIT_QUEUE_HEAD(resync_wait);
static void md_do_sync(mddev_t *mddev)
{
mddev_t *mddev2;
unsigned int max_sectors, currspeed = 0,
j, window;
unsigned int currspeed = 0,
window;
sector_t max_sectors,j;
unsigned long mark[SYNC_MARKS];
unsigned long mark_cnt[SYNC_MARKS];
sector_t mark_cnt[SYNC_MARKS];
int last_mark,m;
struct list_head *tmp;
unsigned long last_check;
sector_t last_check;
/* just incase thread restarts... */
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
......@@ -3253,8 +3254,8 @@ static void md_do_sync(mddev_t *mddev)
* Tune reconstruction:
*/
window = 32*(PAGE_SIZE/512);
printk(KERN_INFO "md: using %dk window, over a total of %d blocks.\n",
window/2,max_sectors/2);
printk(KERN_INFO "md: using %dk window, over a total of %Lu blocks.\n",
window/2,(unsigned long long) max_sectors/2);
atomic_set(&mddev->recovery_active, 0);
init_waitqueue_head(&mddev->recovery_wait);
......@@ -3322,7 +3323,7 @@ static void md_do_sync(mddev_t *mddev)
*/
cond_resched();
currspeed = (j-mddev->resync_mark_cnt)/2/((jiffies-mddev->resync_mark)/HZ +1) +1;
currspeed = ((unsigned long)(j-mddev->resync_mark_cnt))/2/((jiffies-mddev->resync_mark)/HZ +1) +1;
if (currspeed > sysctl_speed_limit_min) {
if ((currspeed > sysctl_speed_limit_max) ||
......
......@@ -181,7 +181,7 @@ static int grow_buffers(struct stripe_head *sh, int num)
static void raid5_build_block (struct stripe_head *sh, int i);
static inline void init_stripe(struct stripe_head *sh, unsigned long sector, int pd_idx)
static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
{
raid5_conf_t *conf = sh->raid_conf;
int disks = conf->raid_disks, i;
......@@ -218,25 +218,25 @@ static inline void init_stripe(struct stripe_head *sh, unsigned long sector, int
insert_hash(conf, sh);
}
static struct stripe_head *__find_stripe(raid5_conf_t *conf, unsigned long sector)
static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector)
{
struct stripe_head *sh;
CHECK_DEVLOCK();
PRINTK("__find_stripe, sector %lu\n", sector);
PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
for (sh = stripe_hash(conf, sector); sh; sh = sh->hash_next)
if (sh->sector == sector)
return sh;
PRINTK("__stripe %lu not in cache\n", sector);
PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
return NULL;
}
static struct stripe_head *get_active_stripe(raid5_conf_t *conf, unsigned long sector,
static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector,
int pd_idx, int noblock)
{
struct stripe_head *sh;
PRINTK("get_stripe, sector %lu\n", sector);
PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector);
spin_lock_irq(&conf->device_lock);
......@@ -495,7 +495,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
* Input: a 'big' sector number,
* Output: index of the data and parity disk, and the sector # in them.
*/
static unsigned long raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
unsigned int data_disks, unsigned int * dd_idx,
unsigned int * pd_idx, raid5_conf_t *conf)
{
......@@ -556,7 +556,7 @@ static unsigned long raid5_compute_sector(sector_t r_sector, unsigned int raid_d
/*
* Finally, compute the new sector number
*/
new_sector = stripe * sectors_per_chunk + chunk_offset;
new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
return new_sector;
}
......@@ -567,7 +567,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
int raid_disks = conf->raid_disks, data_disks = raid_disks - 1;
sector_t new_sector = sh->sector, check;
int sectors_per_chunk = conf->chunk_size >> 9;
long stripe;
sector_t stripe;
int chunk_offset;
int chunk_number, dummy1, dummy2, dd_idx = i;
sector_t r_sector;
......@@ -1388,7 +1388,7 @@ static int sync_request (mddev_t *mddev, sector_t sector_nr, int go_faster)
unsigned long stripe;
int chunk_offset;
int dd_idx, pd_idx;
unsigned long first_sector;
sector_t first_sector;
int raid_disks = conf->raid_disks;
int data_disks = raid_disks-1;
......@@ -1401,7 +1401,7 @@ static int sync_request (mddev_t *mddev, sector_t sector_nr, int go_faster)
stripe = x;
BUG_ON(x != stripe);
first_sector = raid5_compute_sector(stripe*data_disks*sectors_per_chunk
first_sector = raid5_compute_sector((sector_t)stripe*data_disks*sectors_per_chunk
+ chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf);
sh = get_active_stripe(conf, sector_nr, pd_idx, 1);
if (sh == NULL) {
......
......@@ -200,7 +200,7 @@ static int grow_buffers(struct stripe_head *sh, int num)
static void raid6_build_block (struct stripe_head *sh, int i);
static inline void init_stripe(struct stripe_head *sh, unsigned long sector, int pd_idx)
static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
{
raid6_conf_t *conf = sh->raid_conf;
int disks = conf->raid_disks, i;
......@@ -237,25 +237,25 @@ static inline void init_stripe(struct stripe_head *sh, unsigned long sector, int
insert_hash(conf, sh);
}
static struct stripe_head *__find_stripe(raid6_conf_t *conf, unsigned long sector)
static struct stripe_head *__find_stripe(raid6_conf_t *conf, sector_t sector)
{
struct stripe_head *sh;
CHECK_DEVLOCK();
PRINTK("__find_stripe, sector %lu\n", sector);
PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
for (sh = stripe_hash(conf, sector); sh; sh = sh->hash_next)
if (sh->sector == sector)
return sh;
PRINTK("__stripe %lu not in cache\n", sector);
PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
return NULL;
}
static struct stripe_head *get_active_stripe(raid6_conf_t *conf, unsigned long sector,
static struct stripe_head *get_active_stripe(raid6_conf_t *conf, sector_t sector,
int pd_idx, int noblock)
{
struct stripe_head *sh;
PRINTK("get_stripe, sector %lu\n", sector);
PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector);
spin_lock_irq(&conf->device_lock);
......@@ -516,7 +516,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
* Input: a 'big' sector number,
* Output: index of the data and parity disk, and the sector # in them.
*/
static unsigned long raid6_compute_sector(sector_t r_sector, unsigned int raid_disks,
static sector_t raid6_compute_sector(sector_t r_sector, unsigned int raid_disks,
unsigned int data_disks, unsigned int * dd_idx,
unsigned int * pd_idx, raid6_conf_t *conf)
{
......@@ -588,7 +588,7 @@ static unsigned long raid6_compute_sector(sector_t r_sector, unsigned int raid_d
/*
* Finally, compute the new sector number
*/
new_sector = stripe * sectors_per_chunk + chunk_offset;
new_sector = (sector_t) stripe * sectors_per_chunk + chunk_offset;
return new_sector;
}
......@@ -599,7 +599,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
int raid_disks = conf->raid_disks, data_disks = raid_disks - 2;
sector_t new_sector = sh->sector, check;
int sectors_per_chunk = conf->chunk_size >> 9;
long stripe;
sector_t stripe;
int chunk_offset;
int chunk_number, dummy1, dummy2, dd_idx = i;
sector_t r_sector;
......@@ -1550,7 +1550,7 @@ static int sync_request (mddev_t *mddev, sector_t sector_nr, int go_faster)
unsigned long stripe;
int chunk_offset;
int dd_idx, pd_idx;
unsigned long first_sector;
sector_t first_sector;
int raid_disks = conf->raid_disks;
int data_disks = raid_disks - 2;
......@@ -1563,7 +1563,7 @@ static int sync_request (mddev_t *mddev, sector_t sector_nr, int go_faster)
stripe = x;
BUG_ON(x != stripe);
first_sector = raid6_compute_sector(stripe*data_disks*sectors_per_chunk
first_sector = raid6_compute_sector((sector_t)stripe*data_disks*sectors_per_chunk
+ chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf);
sh = get_active_stripe(conf, sector_nr, pd_idx, 1);
if (sh == NULL) {
......
......@@ -27,7 +27,7 @@
#include <asm/current.h>
#include <asm/uaccess.h>
#define DEBUG /* undef me for production */
#undef DEBUG
#ifdef DEBUG
#define DPRINTK(stuff...) printk (stuff)
......
......@@ -251,6 +251,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd,
tf->lbam = scsicmd[2];
tf->lbah = scsicmd[1] & 0x1f; /* mask out reserved bits */
tf->device |= ATA_LBA;
VPRINTK("six-byte command\n");
return 0;
}
......
......@@ -29,6 +29,8 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/fb.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <asm/hardware.h>
#include <asm/io.h>
......@@ -1254,6 +1256,11 @@ free_unused_pages(unsigned int virtual_start, unsigned int virtual_end)
printk("acornfb: freed %dK memory\n", mb_freed);
}
static struct device acornfb_device = {
.bus_id = "acornfb",
.coherent_dma_mask = 0xffffffff,
};
int __init
acornfb_init(void)
{
......@@ -1263,6 +1270,8 @@ acornfb_init(void)
acornfb_init_fbinfo();
current_par.dev = &acornfb_device;
if (current_par.montype == -1)
current_par.montype = acornfb_detect_monitortype();
......@@ -1323,37 +1332,30 @@ acornfb_init(void)
#if defined(HAS_VIDC20)
if (!current_par.using_vram) {
dma_addr_t handle;
void *base;
/*
* RiscPC needs to allocate the DRAM memory
* for the framebuffer if we are not using
* VRAM. Archimedes/A5000 machines use a
* fixed address for their framebuffers.
* VRAM.
*/
unsigned long page, top, base;
int order = get_order(size);
base = __get_free_pages(GFP_KERNEL, order);
if (base == 0) {
base = dma_alloc_writecombine(current_par.dev, size, &handle,
GFP_KERNEL);
if (base == NULL) {
printk(KERN_ERR "acornfb: unable to allocate screen "
"memory\n");
return -ENOMEM;
}
top = base + (PAGE_SIZE << order);
/* Mark the framebuffer pages as reserved so mmap will work. */
for (page = base; page < PAGE_ALIGN(base + size); page += PAGE_SIZE)
SetPageReserved(virt_to_page(page));
/* Hand back any excess pages that we allocated. */
for (page = base + size; page < top; page += PAGE_SIZE)
free_page(page);
fb_info.screen_base = (char *)base;
fb_info.fix.smem_start = virt_to_phys(fb_info.screen_base);
fb_info.screen_base = base;
fb_info.fix.smem_start = handle;
}
#endif
#if defined(HAS_VIDC)
/*
* Free unused pages
* Archimedes/A5000 machines use a fixed address for their
* framebuffers. Free unused pages
*/
free_unused_pages(PAGE_OFFSET + size, PAGE_OFFSET + MAX_SIZE);
#endif
......
......@@ -47,6 +47,7 @@ union palette {
};
struct acornfb_par {
struct device *dev;
unsigned long screen_end;
unsigned int dram_size;
unsigned int vram_half_sam;
......
......@@ -1595,12 +1595,18 @@ static int __init sa1100fb_map_video_memory(struct sa1100fb_info *fbi)
* of the framebuffer.
*/
fbi->map_size = PAGE_ALIGN(fbi->fb.fix.smem_len + PAGE_SIZE);
fbi->map_cpu = consistent_alloc(GFP_KERNEL, fbi->map_size,
&fbi->map_dma, PTE_BUFFERABLE);
fbi->map_cpu = dma_alloc_writecombine(fbi->dev, fbi->map_size,
&fbi->map_dma, GFP_KERNEL);
if (fbi->map_cpu) {
fbi->fb.screen_base = fbi->map_cpu + PAGE_SIZE;
fbi->screen_dma = fbi->map_dma + PAGE_SIZE;
/*
* FIXME: this is actually the wrong thing to place in
* smem_start. But fbdev suffers from the problem that
* it needs an API which doesn't exist (in this case,
* dma_writecombine_mmap)
*/
fbi->fb.fix.smem_start = fbi->screen_dma;
}
......@@ -1613,7 +1619,7 @@ static struct fb_monspecs monspecs __initdata = {
};
static struct sa1100fb_info * __init sa1100fb_init_fbinfo(void)
static struct sa1100fb_info * __init sa1100fb_init_fbinfo(struct device *dev)
{
struct sa1100fb_mach_info *inf;
struct sa1100fb_info *fbi;
......@@ -1624,6 +1630,7 @@ static struct sa1100fb_info * __init sa1100fb_init_fbinfo(void)
return NULL;
memset(fbi, 0, sizeof(struct sa1100fb_info));
fbi->dev = dev;
strcpy(fbi->fb.fix.id, SA1100_NAME);
......@@ -1703,7 +1710,7 @@ static int __init sa1100fb_probe(struct device *dev)
if (!request_mem_region(0xb0100000, 0x10000, "LCD"))
return -EBUSY;
fbi = sa1100fb_init_fbinfo();
fbi = sa1100fb_init_fbinfo(dev);
ret = -ENOMEM;
if (!fbi)
goto failed;
......
......@@ -63,6 +63,7 @@ struct sa1100fb_lcd_reg {
struct sa1100fb_info {
struct fb_info fb;
struct device *dev;
struct sa1100fb_rgb *rgb[NR_RGB];
u_int max_bpp;
......
......@@ -44,6 +44,13 @@ extern struct super_block *blockdev_superblock;
*
* This function *must* be atomic for the I_DIRTY_PAGES case -
* set_page_dirty() is called under spinlock in several places.
*
* Note that for blockdevs, inode->dirtied_when represents the dirtying time of
* the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
* the kernel-internal blockdev inode represents the dirtying time of the
* blockdev's pages. This is why for I_DIRTY_PAGES we always use
* page->mapping->host, so the page-dirtying time is recorded in the internal
* blockdev inode.
*/
void __mark_inode_dirty(struct inode *inode, int flags)
{
......@@ -71,7 +78,6 @@ void __mark_inode_dirty(struct inode *inode, int flags)
spin_lock(&inode_lock);
if ((inode->i_state & flags) != flags) {
const int was_dirty = inode->i_state & I_DIRTY;
struct address_space *mapping = inode->i_mapping;
inode->i_state |= flags;
......@@ -99,7 +105,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
* reposition it (that would break s_dirty time-ordering).
*/
if (!was_dirty) {
mapping->dirtied_when = jiffies;
inode->dirtied_when = jiffies;
list_move(&inode->i_list, &sb->s_dirty);
}
}
......@@ -176,11 +182,11 @@ __sync_single_inode(struct inode *inode, struct writeback_control *wbc)
} else if (!list_empty(&mapping->dirty_pages)) {
/* Redirtied */
inode->i_state |= I_DIRTY_PAGES;
mapping->dirtied_when = jiffies;
inode->dirtied_when = jiffies;
list_move(&inode->i_list, &sb->s_dirty);
} else if (inode->i_state & I_DIRTY) {
/* Redirtied */
mapping->dirtied_when = jiffies;
inode->dirtied_when = jiffies;
list_move(&inode->i_list, &sb->s_dirty);
} else if (atomic_read(&inode->i_count)) {
list_move(&inode->i_list, &inode_in_use);
......@@ -220,7 +226,7 @@ __writeback_single_inode(struct inode *inode,
* Write out a superblock's list of dirty inodes. A wait will be performed
* upon no inodes, all inodes or the final one, depending upon sync_mode.
*
* If older_than_this is non-NULL, then only write out mappings which
* If older_than_this is non-NULL, then only write out inodes which
* had their first dirtying at a time earlier than *older_than_this.
*
* If we're a pdlfush thread, then implement pdflush collision avoidance
......@@ -292,11 +298,11 @@ sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
}
/* Was this inode dirtied after sync_sb_inodes was called? */
if (time_after(mapping->dirtied_when, start))
if (time_after(inode->dirtied_when, start))
break;
/* Was this inode dirtied too recently? */
if (wbc->older_than_this && time_after(mapping->dirtied_when,
if (wbc->older_than_this && time_after(inode->dirtied_when,
*wbc->older_than_this))
break;
......@@ -308,7 +314,7 @@ sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
__iget(inode);
__writeback_single_inode(inode, wbc);
if (wbc->sync_mode == WB_SYNC_HOLD) {
mapping->dirtied_when = jiffies;
inode->dirtied_when = jiffies;
list_move(&inode->i_list, &sb->s_dirty);
}
if (current_is_pdflush())
......
......@@ -132,6 +132,7 @@ static struct inode *alloc_inode(struct super_block *sb)
inode->i_cdev = NULL;
inode->i_rdev = 0;
inode->i_security = NULL;
inode->dirtied_when = 0;
if (security_inode_alloc(inode)) {
if (inode->i_sb->s_op->destroy_inode)
inode->i_sb->s_op->destroy_inode(inode);
......@@ -144,7 +145,6 @@ static struct inode *alloc_inode(struct super_block *sb)
mapping->host = inode;
mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
mapping->dirtied_when = 0;
mapping->assoc_mapping = NULL;
mapping->backing_dev_info = &default_backing_dev_info;
if (sb->s_bdev)
......
......@@ -14,8 +14,6 @@
* devices. This is the "generic" version. The PCI specific version
* is in pci.h
*/
extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle, unsigned long flags);
extern void consistent_free(void *vaddr, size_t size, dma_addr_t handle);
extern void consistent_sync(void *kaddr, size_t size, int rw);
/*
......@@ -99,12 +97,26 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp)
* References to memory and mappings associated with cpu_addr/handle
* during and after this call executing are illegal.
*/
static inline void
extern void
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle)
{
consistent_free(cpu_addr, size, handle);
}
dma_addr_t handle);
/**
* dma_alloc_writecombine - allocate writecombining memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @size: required memory size
* @handle: bus-specific DMA address
*
* Allocate some uncached, buffered memory for a device for
* performing DMA. This function allocates pages, and will
* return the CPU-viewed address, and sets @handle to be the
* device-viewed address.
*/
extern void *
dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int gfp);
#define dma_free_writecombine(dev,size,cpu_addr,handle) \
dma_free_coherent(dev,size,cpu_addr,handle)
/**
* dma_map_single - map a single buffer for streaming DMA
......
......@@ -41,19 +41,8 @@ static __inline__ void ide_init_hwif_ports(hw_regs_t *hw, ide_ioreg_t data_port,
hw->io_ports[IDE_IRQ_OFFSET] = 0;
}
static __inline__ void ide_init_default_hwifs(void)
{
#ifndef CONFIG_PCI
hw_regs_t hw;
int index;
for(index = 0; index < MAX_HWIFS; index++) {
ide_init_hwif_ports(&hw, ide_default_io_base(index), 0, NULL);
hw.irq = ide_default_irq(ide_default_io_base(index));
ide_register_hw(&hw);
}
#endif
}
/* There are no standard ports. */
static inline void ide_init_default_hwifs(void) { ; }
#define ide_request_irq(irq,hand,flg,dev,id) request_irq((irq),(hand),(flg),(dev),(id))
#define ide_free_irq(irq,dev_id) free_irq((irq), (dev_id))
......
......@@ -53,24 +53,8 @@ static __inline__ void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_por
hw->io_ports[IDE_IRQ_OFFSET] = 0;
}
/*
* This registers the standard ports for this architecture with the IDE
* driver.
*/
static __inline__ void ide_init_default_hwifs(void)
{
#ifndef CONFIG_PCI
hw_regs_t hw;
int index;
for (index = 0; index < MAX_HWIFS; index++) {
memset(&hw, 0, sizeof hw);
ide_init_hwif_ports(&hw, ide_default_io_base(index), 0, NULL);
hw.irq = ide_default_irq(ide_default_io_base(index));
ide_register_hw(&hw, NULL);
}
#endif
}
/* There are no standard ports. */
static inline void ide_init_default_hwifs(void) { ; }
#define __ide_insl(data_reg, buffer, wcount) \
__ide_insw(data_reg, buffer, (wcount)<<1)
......
......@@ -54,24 +54,8 @@ static __inline__ void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_por
hw->io_ports[IDE_IRQ_OFFSET] = 0;
}
/*
* This registers the standard ports for this architecture with the IDE
* driver.
*/
static __inline__ void ide_init_default_hwifs(void)
{
#ifndef CONFIG_BLK_DEV_IDEPCI
hw_regs_t hw;
int index;
for (index = 0; index < MAX_HWIFS; index++) {
memset(&hw, 0, sizeof hw);
ide_init_hwif_ports(&hw, ide_default_io_base(index), 0, NULL);
hw.irq = ide_default_irq(ide_default_io_base(index));
ide_register_hw(&hw, NULL);
}
#endif /* CONFIG_BLK_DEV_IDEPCI */
}
/* There are no standard ports. */
static inline void ide_init_default_hwifs(void) { ; }
#define __ide_insl(data_reg, buffer, wcount) \
__ide_insw(data_reg, buffer, (wcount)<<1)
......
......@@ -333,7 +333,6 @@ struct address_space {
struct list_head i_mmap_shared; /* list of shared mappings */
struct semaphore i_shared_sem; /* protect both above lists */
atomic_t truncate_count; /* Cover race condition with truncate */
unsigned long dirtied_when; /* jiffies of first page dirtying */
unsigned long flags; /* error bits/gfp mask */
struct backing_dev_info *backing_dev_info; /* device readahead, etc */
spinlock_t private_lock; /* for use by the address_space */
......@@ -416,6 +415,7 @@ struct inode {
struct dnotify_struct *i_dnotify; /* for directory notifications */
unsigned long i_state;
unsigned long dirtied_when; /* jiffies of first dirtying */
unsigned int i_flags;
unsigned char i_sock;
......
......@@ -24,8 +24,6 @@
#include <asm/io.h>
#include <asm/semaphore.h>
#define DEBUG_PM
/*
* This is the multiple IDE interface driver, as evolved from hd.c.
* It supports up to four IDE interfaces, on one or more IRQs (usually 14 & 15).
......
......@@ -212,9 +212,9 @@ struct mddev_s
struct mdk_thread_s *thread; /* management thread */
struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
unsigned long curr_resync; /* blocks scheduled */
sector_t curr_resync; /* blocks scheduled */
unsigned long resync_mark; /* a recent timestamp */
unsigned long resync_mark_cnt;/* blocks written at resync_mark */
sector_t resync_mark_cnt;/* blocks written at resync_mark */
/* recovery/resync flags
* NEEDED: we might need to start a resync/recover
......
......@@ -168,7 +168,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
int bytes, ret, offset;
void *maddr;
ret = get_user_pages(current, mm, addr, 1,
ret = get_user_pages(tsk, mm, addr, 1,
write, 1, &page, &vma);
if (ret <= 0)
break;
......
......@@ -1678,7 +1678,7 @@ asmlinkage void schedule(void)
queue = array->queue + idx;
next = list_entry(queue->next, task_t, run_list);
if (next->activated > 0) {
if (!rt_task(next) && next->activated > 0) {
unsigned long long delta = now - next->timestamp;
if (next->activated == 1)
......
......@@ -1279,6 +1279,9 @@ kmem_cache_create (const char *name, size_t size, size_t offset,
cachep->dtor = dtor;
cachep->name = name;
/* Don't let CPUs to come and go */
lock_cpu_hotplug();
if (g_cpucache_up == FULL) {
enable_cpucache(cachep);
} else {
......@@ -1328,6 +1331,7 @@ kmem_cache_create (const char *name, size_t size, size_t offset,
if (!strcmp(pc->name,name)) {
printk("kmem_cache_create: duplicate cache %s\n",name);
up(&cache_chain_sem);
unlock_cpu_hotplug();
BUG();
}
}
......@@ -1337,6 +1341,7 @@ kmem_cache_create (const char *name, size_t size, size_t offset,
/* cache setup completed, link it into the list */
list_add(&cachep->next, &cache_chain);
up(&cache_chain_sem);
unlock_cpu_hotplug();
opps:
return cachep;
}
......@@ -1487,6 +1492,9 @@ int kmem_cache_destroy (kmem_cache_t * cachep)
if (!cachep || in_interrupt())
BUG();
/* Don't let CPUs to come and go */
lock_cpu_hotplug();
/* Find the cache in the chain of caches. */
down(&cache_chain_sem);
/*
......@@ -1500,6 +1508,7 @@ int kmem_cache_destroy (kmem_cache_t * cachep)
down(&cache_chain_sem);
list_add(&cachep->next,&cache_chain);
up(&cache_chain_sem);
unlock_cpu_hotplug();
return 1;
}
......@@ -1514,6 +1523,8 @@ int kmem_cache_destroy (kmem_cache_t * cachep)
cachep->lists.shared = NULL;
kmem_cache_free(&cache_cache, cachep);
unlock_cpu_hotplug();
return 0;
}
......
......@@ -1167,7 +1167,7 @@ static int swap_show(struct seq_file *swap, void *v)
file = ptr->swap_file;
len = seq_path(swap, file->f_vfsmnt, file->f_dentry, " \t\n\\");
seq_printf(swap, "%*s %s\t%d\t%ld\t%d\n",
seq_printf(swap, "%*s%s\t%d\t%ld\t%d\n",
len < 40 ? 40 - len : 1, " ",
S_ISBLK(file->f_dentry->d_inode->i_mode) ?
"partition" : "file\t",
......
......@@ -1462,7 +1462,9 @@ static int cm_open_mixdev(struct inode *inode, struct file *file)
static int cm_release_mixdev(struct inode *inode, struct file *file)
{
struct cm_state *s = (struct cm_state *)file->private_data;
struct cm_state *s;
s = file->private_data;
VALIDATE_STATE(s);
return 0;
......
......@@ -160,7 +160,6 @@ typedef struct {
} opl3sa2_state_t;
static opl3sa2_state_t opl3sa2_state[OPL3SA2_CARDS_MAX];
static spinlock_t opl3sa2_lock = SPIN_LOCK_UNLOCKED;
/* Our parameters */
......@@ -853,6 +852,8 @@ static struct pnp_driver opl3sa2_driver = {
/* End of component functions */
#ifdef CONFIG_PM
static spinlock_t opl3sa2_lock = SPIN_LOCK_UNLOCKED;
/* Power Management support functions */
static int opl3sa2_suspend(struct pm_dev *pdev, unsigned int pm_mode)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment