Commit bf1a985a authored by Richard Russon's avatar Richard Russon

Merge flatcap.org:/home/flatcap/backup/bk/linux-2.6

into flatcap.org:/home/flatcap/backup/bk/ntfs-2.6
parents 1ffa6844 0eaf89f2
......@@ -78,6 +78,7 @@ config PPC_PMAC
bool " Apple G5 based machines"
default y
select ADB_PMU
select U3_DART
config PPC
bool
......@@ -109,16 +110,10 @@ config PPC_SPLPAR
processors, that is, which share physical processors between
two or more partitions.
config PMAC_DART
bool "Enable DART/IOMMU on PowerMac (allow >2G of RAM)"
depends on PPC_PMAC
depends on EXPERIMENTAL
config U3_DART
bool
depends on PPC_MULTIPLATFORM
default n
help
Enabling DART makes it possible to boot a PowerMac G5 with more
than 2GB of memory. Note that the code is very new and untested
at this time, so it has to be considered experimental. Enabling
this might result in data loss.
config PPC_PMAC64
bool
......
......@@ -49,7 +49,7 @@ obj-$(CONFIG_HVCS) += hvcserver.o
obj-$(CONFIG_PPC_PMAC) += pmac_setup.o pmac_feature.o pmac_pci.o \
pmac_time.o pmac_nvram.o pmac_low_i2c.o \
open_pic_u3.o
obj-$(CONFIG_PMAC_DART) += pmac_iommu.o
obj-$(CONFIG_U3_DART) += u3_iommu.o
ifdef CONFIG_SMP
obj-$(CONFIG_PPC_PMAC) += pmac_smp.o smp-tbsync.o
......
......@@ -687,7 +687,7 @@ _GLOBAL(kernel_thread)
ld r30,-16(r1)
blr
#ifndef CONFIG_PPC_PSERIE /* hack hack hack */
#ifndef CONFIG_PPC_PSERIES /* hack hack hack */
#define ppc_rtas sys_ni_syscall
#endif
......
......@@ -439,7 +439,7 @@ void hpte_init_lpar(void)
ppc_md.hpte_insert = pSeries_lpar_hpte_insert;
ppc_md.hpte_remove = pSeries_lpar_hpte_remove;
ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range;
ppc_md.htpe_clear_all = pSeries_lpar_hptab_clear;
ppc_md.hpte_clear_all = pSeries_lpar_hptab_clear;
htab_finish_init();
}
......@@ -29,6 +29,4 @@ extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
extern void pmac_nvram_init(void);
extern void pmac_iommu_alloc(void);
#endif /* __PMAC_H__ */
......@@ -664,9 +664,7 @@ void __init pmac_pcibios_fixup(void)
pci_fix_bus_sysdata();
#ifdef CONFIG_PMAC_DART
iommu_setup_pmac();
#endif /* CONFIG_PMAC_DART */
iommu_setup_u3();
}
......
......@@ -447,16 +447,6 @@ static int __init pmac_probe(int platform)
if (platform != PLATFORM_POWERMAC)
return 0;
#ifdef CONFIG_PMAC_DART
/*
* On U3, the DART (iommu) must be allocated now since it
* has an impact on htab_initialize (due to the large page it
* occupies having to be broken up so the DART itself is not
* part of the cacheable linar mapping
*/
pmac_iommu_alloc();
#endif /* CONFIG_PMAC_DART */
return 1;
}
......
......@@ -423,13 +423,6 @@ static void __init early_cmdline_parse(void)
else if (!strncmp(opt, RELOC("force"), 5))
RELOC(iommu_force_on) = 1;
}
#ifndef CONFIG_PMAC_DART
if (RELOC(of_platform) == PLATFORM_POWERMAC) {
RELOC(ppc64_iommu_off) = 1;
prom_printf("DART disabled on PowerMac !\n");
}
#endif
}
/*
......
......@@ -50,6 +50,7 @@
#include <asm/setup.h>
#include <asm/system.h>
#include <asm/rtas.h>
#include <asm/iommu.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
......@@ -405,6 +406,16 @@ void __init early_setup(unsigned long dt_ptr)
DBG("Found, Initializing memory management...\n");
#ifdef CONFIG_U3_DART
/*
* On U3, the DART (iommu) must be allocated now since it
* has an impact on htab_initialize (due to the large page it
* occupies having to be broken up so the DART itself is not
* part of the cacheable linar mapping
*/
alloc_u3_dart_table();
#endif /* CONFIG_U3_DART */
/*
* Initialize stab / SLB management
*/
......
/*
* arch/ppc64/kernel/pmac_iommu.c
* arch/ppc64/kernel/u3_iommu.c
*
* Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
*
......@@ -7,7 +7,7 @@
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
* Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
*
* Dynamic DMA mapping support, PowerMac G5 (DART)-specific parts.
* Dynamic DMA mapping support, Apple U3 & IBM CPC925 "DART" iommu.
*
*
* This program is free software; you can redistribute it and/or modify
......@@ -89,7 +89,7 @@ static unsigned int *dart;
/* Dummy val that entries are set to when unused */
static unsigned int dart_emptyval;
static struct iommu_table iommu_table_pmac;
static struct iommu_table iommu_table_u3;
static int dart_dirty;
#define DBG(...)
......@@ -141,9 +141,9 @@ static void dart_flush(struct iommu_table *tbl)
dart_dirty = 0;
}
static void dart_build_pmac(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction)
static void dart_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction)
{
unsigned int *dp;
unsigned int rpn;
......@@ -152,7 +152,7 @@ static void dart_build_pmac(struct iommu_table *tbl, long index,
dp = ((unsigned int*)tbl->it_base) + index;
/* On pmac, all memory is contigous, so we can move this
/* On U3, all memory is contigous, so we can move this
* out of the loop.
*/
while (npages--) {
......@@ -168,7 +168,7 @@ static void dart_build_pmac(struct iommu_table *tbl, long index,
}
static void dart_free_pmac(struct iommu_table *tbl, long index, long npages)
static void dart_free(struct iommu_table *tbl, long index, long npages)
{
unsigned int *dp;
......@@ -239,32 +239,32 @@ static int dart_init(struct device_node *dart_node)
/* Invalidate DART to get rid of possible stale TLBs */
dart_tlb_invalidate_all();
iommu_table_pmac.it_busno = 0;
iommu_table_u3.it_busno = 0;
/* Units of tce entries */
iommu_table_pmac.it_offset = 0;
iommu_table_u3.it_offset = 0;
/* Set the tce table size - measured in pages */
iommu_table_pmac.it_size = dart_tablesize >> PAGE_SHIFT;
iommu_table_u3.it_size = dart_tablesize >> PAGE_SHIFT;
/* Initialize the common IOMMU code */
iommu_table_pmac.it_base = (unsigned long)dart_vbase;
iommu_table_pmac.it_index = 0;
iommu_table_pmac.it_blocksize = 1;
iommu_table_pmac.it_entrysize = sizeof(u32);
iommu_init_table(&iommu_table_pmac);
iommu_table_u3.it_base = (unsigned long)dart_vbase;
iommu_table_u3.it_index = 0;
iommu_table_u3.it_blocksize = 1;
iommu_table_u3.it_entrysize = sizeof(u32);
iommu_init_table(&iommu_table_u3);
/* Reserve the last page of the DART to avoid possible prefetch
* past the DART mapped area
*/
set_bit(iommu_table_pmac.it_mapsize - 1, iommu_table_pmac.it_map);
set_bit(iommu_table_u3.it_mapsize - 1, iommu_table_u3.it_map);
printk(KERN_INFO "U3-DART IOMMU initialized\n");
printk(KERN_INFO "U3/CPC925 DART IOMMU initialized\n");
return 0;
}
void iommu_setup_pmac(void)
void iommu_setup_u3(void)
{
struct pci_dev *dev = NULL;
struct device_node *dn;
......@@ -275,8 +275,8 @@ void iommu_setup_pmac(void)
return;
/* Setup low level TCE operations for the core IOMMU code */
ppc_md.tce_build = dart_build_pmac;
ppc_md.tce_free = dart_free_pmac;
ppc_md.tce_build = dart_build;
ppc_md.tce_free = dart_free;
ppc_md.tce_flush = dart_flush;
/* Initialize the DART HW */
......@@ -296,11 +296,11 @@ void iommu_setup_pmac(void)
*/
struct device_node *dn = pci_device_to_OF_node(dev);
if (dn)
dn->iommu_table = &iommu_table_pmac;
dn->iommu_table = &iommu_table_u3;
}
}
void __init pmac_iommu_alloc(void)
void __init alloc_u3_dart_table(void)
{
/* Only reserve DART space if machine has more than 2GB of RAM
* or if requested with iommu=on on cmdline.
......
......@@ -71,9 +71,9 @@
*
*/
#ifdef CONFIG_PMAC_DART
#ifdef CONFIG_U3_DART
extern unsigned long dart_tablebase;
#endif /* CONFIG_PMAC_DART */
#endif /* CONFIG_U3_DART */
HTAB htab_data = {NULL, 0, 0, 0, 0};
......@@ -203,7 +203,7 @@ void __init htab_initialize(void)
DBG("creating mapping for region: %lx : %lx\n", base, size);
#ifdef CONFIG_PMAC_DART
#ifdef CONFIG_U3_DART
/* Do not map the DART space. Fortunately, it will be aligned
* in such a way that it will not cross two lmb regions and will
* fit within a single 16Mb page.
......@@ -223,7 +223,7 @@ void __init htab_initialize(void)
mode_rw, use_largepages);
continue;
}
#endif /* CONFIG_PMAC_DART */
#endif /* CONFIG_U3_DART */
create_pte_mapping(base, base + size, mode_rw, use_largepages);
}
DBG(" <- htab_initialize()\n");
......
......@@ -88,7 +88,7 @@ obj-$(CONFIG_FB_68328) += 68328fb.o cfbfillrect.o cfbcopyarea.o cfbim
obj-$(CONFIG_FB_GBE) += gbefb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o
obj-$(CONFIG_FB_CIRRUS) += cirrusfb.o cfbfillrect.o cfbimgblt.o cfbcopyarea.o
obj-$(CONFIG_FB_ASILIANT) += asiliantfb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o
obj-$(CONFIG_FB_PXA) += pxafb.o cfbimgblt.o cfbcopyarea.o cfbfillrect0.o
obj-$(CONFIG_FB_PXA) += pxafb.o cfbimgblt.o cfbcopyarea.o cfbfillrect.o
# Platform or fallback drivers go here
obj-$(CONFIG_FB_VESA) += vesafb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o
......
......@@ -2,7 +2,7 @@
* ldm - Support for Windows Logical Disk Manager (Dynamic Disks)
*
* Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org>
* Copyright (C) 2001 Anton Altaparmakov <aia21@cantab.net>
* Copyright (c) 2001-2004 Anton Altaparmakov
* Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
*
* Documentation is available at http://linux-ntfs.sf.net/ldm
......@@ -517,9 +517,15 @@ static BOOL ldm_validate_vmdb (struct block_device *bdev, unsigned long base,
if (vm->vblk_offset != 512)
ldm_info ("VBLKs start at offset 0x%04x.", vm->vblk_offset);
/* FIXME: How should we handle this situation? */
if ((vm->vblk_size * vm->last_vblk_seq) != (toc->bitmap1_size << 9))
ldm_info ("VMDB and TOCBLOCK don't agree on the database size.");
/*
* The last_vblkd_seq can be before the end of the vmdb, just make sure
* it is not out of bounds.
*/
if ((vm->vblk_size * vm->last_vblk_seq) > (toc->bitmap1_size << 9)) {
ldm_crit ("VMDB exceeds allowed size specified by TOCBLOCK. "
"Database is corrupt. Aborting.");
goto out;
}
result = TRUE;
out:
......
......@@ -108,7 +108,7 @@ struct scatterlist;
/* Walks all buses and creates iommu tables */
extern void iommu_setup_pSeries(void);
extern void iommu_setup_pmac(void);
extern void iommu_setup_u3(void);
/* Creates table for an individual device node */
extern void iommu_devnode_init(struct device_node *dn);
......@@ -155,6 +155,8 @@ extern void tce_init_iSeries(void);
extern void pci_iommu_init(void);
extern void pci_dma_init_direct(void);
extern void alloc_u3_dart_table(void);
extern int ppc64_iommu_off;
#endif /* _ASM_IOMMU_H */
......@@ -58,7 +58,7 @@ struct machdep_calls {
int local);
/* special for kexec, to be called in real mode, linar mapping is
* destroyed as well */
void (*htpe_clear_all)(void);
void (*hpte_clear_all)(void);
void (*tce_build)(struct iommu_table * tbl,
long index,
......
......@@ -59,54 +59,7 @@ struct systemcfg {
#ifdef __KERNEL__
extern struct systemcfg *systemcfg;
#else
/* Processor Version Register (PVR) field extraction */
#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
/* Processor Version Numbers */
#define PV_NORTHSTAR 0x0033
#define PV_PULSAR 0x0034
#define PV_POWER4 0x0035
#define PV_ICESTAR 0x0036
#define PV_SSTAR 0x0037
#define PV_POWER4p 0x0038
#define PV_GPUL 0x0039
#define PV_POWER5 0x003a
#define PV_970FX 0x003c
#define PV_630 0x0040
#define PV_630p 0x0041
/* Platforms supported by PPC64 */
#define PLATFORM_PSERIES 0x0100
#define PLATFORM_PSERIES_LPAR 0x0101
#define PLATFORM_ISERIES_LPAR 0x0201
#define PLATFORM_POWERMAC 0x0400
/* Compatibility with drivers coming from PPC32 world */
#define _machine (systemcfg->platform)
#define _MACH_Pmac PLATFORM_POWERMAC
static inline volatile struct systemcfg *systemcfg_init(void)
{
int fd = open("/proc/ppc64/systemcfg", O_RDONLY);
volatile struct systemcfg *ret;
if (fd == -1)
return 0;
ret = mmap(0, sizeof(struct systemcfg), PROT_READ, MAP_SHARED, fd, 0);
close(fd);
if (!ret)
return 0;
if (ret->version.major != SYSTEMCFG_MAJOR || ret->version.minor < SYSTEMCFG_MINOR) {
munmap((void *)ret, sizeof(struct systemcfg));
return 0;
}
return ret;
}
#endif /* __KERNEL__ */
#endif
#endif /* __ASSEMBLY__ */
......
......@@ -156,6 +156,29 @@ do { \
__wait_event(wq, condition); \
} while (0)
#define __wait_event_timeout(wq, condition, ret) \
do { \
DEFINE_WAIT(__wait); \
\
for (;;) { \
prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
if (condition) \
break; \
ret = schedule_timeout(ret); \
if (!ret) \
break; \
} \
finish_wait(&wq, &__wait); \
} while (0)
#define wait_event_timeout(wq, condition, timeout) \
({ \
long __ret = timeout; \
if (!(condition)) \
__wait_event_timeout(wq, condition, __ret); \
__ret; \
})
#define __wait_event_interruptible(wq, condition, ret) \
do { \
DEFINE_WAIT(__wait); \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment