Commit 8316120b authored by David S. Miller's avatar David S. Miller

Merge davem@nuts.ninka.net:/home/davem/src/BK/net-2.5

into kernel.bkbits.net:/home/davem/net-2.5
parents ac1a6df6 29b1ac58
......@@ -460,6 +460,13 @@ M: henrique@cyclades.com
W: http://www.cyclades.com/
S: Supported
DAC960 RAID CONTROLLER DRIVER
P: Dave Olien
M dmo@osdl.org
W: http://www.osdl.org/archive/dmo/DAC960
L: linux-kernel@vger.kernel.org
S: Maintained
DAMA SLAVE for AX.25
P: Joerg Reuter
M: jreuter@yaina.de
......
VERSION = 2
PATCHLEVEL = 5
SUBLEVEL = 59
SUBLEVEL = 60
EXTRAVERSION =
# *DOCUMENTATION*
......
......@@ -290,14 +290,7 @@ ecard_task(void * unused)
{
struct task_struct *tsk = current;
/*
* We don't want /any/ signals, not even SIGKILL
*/
sigfillset(&tsk->blocked);
sigemptyset(&tsk->pending.signal);
recalc_sigpending();
strcpy(tsk->comm, "kecardd");
daemonize();
daemonize("kecardd");
/*
* Allocate a mm. We're not a lazy-TLB kernel task since we need
......
......@@ -52,7 +52,7 @@ int using_apic_timer = 0;
int prof_multiplier[NR_CPUS] = { 1, };
int prof_old_multiplier[NR_CPUS] = { 1, };
int prof_counter[NR_CPUS] = { 1, };
DEFINE_PER_CPU(int, prof_counter) = 1;
int get_maxlvt(void)
{
......@@ -997,7 +997,7 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs)
x86_do_profile(regs);
if (--prof_counter[cpu] <= 0) {
if (--per_cpu(prof_counter, cpu) <= 0) {
/*
* The multiplier may have changed since the last time we got
* to this point as a result of the user writing to
......@@ -1006,10 +1006,12 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs)
*
* Interrupts are already masked off at this point.
*/
prof_counter[cpu] = prof_multiplier[cpu];
if (prof_counter[cpu] != prof_old_multiplier[cpu]) {
__setup_APIC_LVTT(calibration_result/prof_counter[cpu]);
prof_old_multiplier[cpu] = prof_counter[cpu];
per_cpu(prof_counter, cpu) = prof_multiplier[cpu];
if (per_cpu(prof_counter, cpu) != prof_old_multiplier[cpu]) {
__setup_APIC_LVTT(
calibration_result/
per_cpu(prof_counter, cpu));
prof_old_multiplier[cpu] = per_cpu(prof_counter, cpu);
}
#ifdef CONFIG_SMP
......
......@@ -1742,9 +1742,8 @@ static int apm(void *unused)
kapmd_running = 1;
daemonize();
daemonize("kapmd");
strcpy(current->comm, "kapmd");
current->flags |= PF_IOTHREAD;
sigfillset(&current->blocked);
......
......@@ -510,9 +510,8 @@ int balanced_irq(void *unused)
int i;
unsigned long prev_balance_time = jiffies;
long time_remaining = balanced_irq_interval;
daemonize();
sigfillset(&current->blocked);
sprintf(current->comm, "kirqd");
daemonize("kirqd");
/* push everything to CPU 0 to give us a starting point. */
for (i = 0 ; i < NR_IRQS ; i++)
......@@ -1440,7 +1439,8 @@ void disable_IO_APIC(void)
* by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
*/
static void __init setup_ioapic_ids_from_mpc (void)
#ifndef CONFIG_X86_NUMAQ
static void __init setup_ioapic_ids_from_mpc(void)
{
struct IO_APIC_reg_00 reg_00;
unsigned long phys_id_present_map;
......@@ -1533,6 +1533,9 @@ static void __init setup_ioapic_ids_from_mpc (void)
printk(" ok.\n");
}
}
#else
static void __init setup_ioapic_ids_from_mpc(void) { }
#endif
/*
* There is a nasty bug in some older SMP boards, their mptable lies
......
......@@ -63,8 +63,6 @@ extern void show_registers(struct pt_regs *regs);
CRU_ESCR0 (with any non-null event selector) through a complemented
max threshold. [IA32-Vol3, Section 14.9.9] */
#define MSR_P4_IQ_COUNTER0 0x30C
#define MSR_P4_IQ_CCCR0 0x36C
#define MSR_P4_CRU_ESCR0 0x3B8
#define P4_NMI_CRU_ESCR0 (P4_ESCR_EVENT_SELECT(0x3F)|P4_ESCR_OS|P4_ESCR_USR)
#define P4_NMI_IQ_CCCR0 \
(P4_CCCR_OVF_PMI|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
......
......@@ -935,10 +935,6 @@ static void smp_tune_scheduling (void)
* Cycle through the processors sending APIC IPIs to boot each.
*/
extern int prof_multiplier[NR_CPUS];
extern int prof_old_multiplier[NR_CPUS];
extern int prof_counter[NR_CPUS];
static int boot_cpu_logical_apicid;
/* Where the IO area was mapped on multiquad, always 0 otherwise */
void *xquad_portio;
......@@ -949,17 +945,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
{
int apicid, cpu, bit;
/*
* Initialize the logical to physical CPU number mapping
* and the per-CPU profiling counter/multiplier
*/
for (cpu = 0; cpu < NR_CPUS; cpu++) {
prof_counter[cpu] = 1;
prof_old_multiplier[cpu] = 1;
prof_multiplier[cpu] = 1;
}
/*
* Setup boot CPU information
*/
......
......@@ -236,7 +236,7 @@ static __u32 trampoline_base;
/* The per cpu profile stuff - used in smp_local_timer_interrupt */
static unsigned int prof_multiplier[NR_CPUS] __cacheline_aligned = { 1, };
static unsigned int prof_old_multiplier[NR_CPUS] __cacheline_aligned = { 1, };
static unsigned int prof_counter[NR_CPUS] __cacheline_aligned = { 1, };
static DEFINE_PER_CPU(unsigned int, prof_counter) = 1;
/* the map used to check if a CPU has booted */
static __u32 cpu_booted_map;
......@@ -393,9 +393,6 @@ find_smp_config(void)
/* initialize the CPU structures (moved from smp_boot_cpus) */
for(i=0; i<NR_CPUS; i++) {
prof_counter[i] = 1;
prof_old_multiplier[i] = 1;
prof_multiplier[i] = 1;
cpu_irq_affinity[i] = ~0;
}
cpu_online_map = (1<<boot_cpu_id);
......@@ -1312,7 +1309,7 @@ smp_local_timer_interrupt(struct pt_regs * regs)
x86_do_profile(regs);
if (--prof_counter[cpu] <= 0) {
if (--per_cpu(prof_counter, cpu) <= 0) {
/*
* The multiplier may have changed since the last time we got
* to this point as a result of the user writing to
......@@ -1321,10 +1318,10 @@ smp_local_timer_interrupt(struct pt_regs * regs)
*
* Interrupts are already masked off at this point.
*/
prof_counter[cpu] = prof_multiplier[cpu];
if (prof_counter[cpu] != prof_old_multiplier[cpu]) {
per_cpu(prof_counter,cpu) = prof_multiplier[cpu];
if (per_cpu(prof_counter, cpu) != prof_old_multiplier[cpu]) {
/* FIXME: need to update the vic timer tick here */
prof_old_multiplier[cpu] = prof_counter[cpu];
prof_old_multiplier[cpu] = per_cpu(prof_counter, cpu);
}
update_process_times(user_mode(regs));
......
......@@ -137,13 +137,12 @@ thread(void *unused)
kvoyagerd_running = 1;
reparent_to_init();
daemonize();
daemonize(THREAD_NAME);
set_timeout = 0;
init_timer(&wakeup_timer);
strcpy(current->comm, THREAD_NAME);
sigfillset(&current->blocked);
current->tty = NULL; /* get rid of controlling tty */
......
......@@ -88,6 +88,18 @@ static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struc
set_pte(page_table, entry);
}
/*
* This function checks for proper alignment of input addr and len parameters.
*/
int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
return -EINVAL;
return 0;
}
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma)
{
......
......@@ -7,4 +7,4 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprofile-y := $(DRIVER_OBJS) init.o timer_int.o
oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_athlon.o \
op_model_ppro.o
op_model_ppro.o op_model_p4.o
......@@ -16,14 +16,14 @@
* code unlike the NMI-based code.
*/
extern int nmi_init(struct oprofile_operations ** ops, enum oprofile_cpu * cpu);
extern void timer_init(struct oprofile_operations ** ops, enum oprofile_cpu * cpu);
extern int nmi_init(struct oprofile_operations ** ops);
extern void timer_init(struct oprofile_operations ** ops);
int __init oprofile_arch_init(struct oprofile_operations ** ops, enum oprofile_cpu * cpu)
int __init oprofile_arch_init(struct oprofile_operations ** ops)
{
#ifdef CONFIG_X86_LOCAL_APIC
if (!nmi_init(ops, cpu))
if (!nmi_init(ops))
#endif
timer_init(ops, cpu);
timer_init(ops);
return 0;
}
......@@ -214,12 +214,62 @@ struct oprofile_operations nmi_ops = {
.stop = nmi_stop
};
#if !defined(CONFIG_X86_64)
static int __init p4_init(void)
{
__u8 cpu_model = current_cpu_data.x86_model;
if (cpu_model > 3)
return 0;
#ifndef CONFIG_SMP
nmi_ops.cpu_type = "i386/p4";
model = &op_p4_spec;
return 1;
#else
switch (smp_num_siblings) {
case 1:
nmi_ops.cpu_type = "i386/p4";
model = &op_p4_spec;
return 1;
case 2:
nmi_ops.cpu_type = "i386/p4-ht";
model = &op_p4_ht2_spec;
return 1;
}
#endif
printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
return 0;
}
static int __init ppro_init(void)
{
__u8 cpu_model = current_cpu_data.x86_model;
if (cpu_model > 5) {
nmi_ops.cpu_type = "i386/piii";
} else if (cpu_model > 2) {
nmi_ops.cpu_type = "i386/pii";
} else {
nmi_ops.cpu_type = "i386/ppro";
}
model = &op_ppro_spec;
return 1;
}
#endif /* !CONFIG_X86_64 */
int __init nmi_init(struct oprofile_operations ** ops, enum oprofile_cpu * cpu)
int __init nmi_init(struct oprofile_operations ** ops)
{
__u8 vendor = current_cpu_data.x86_vendor;
__u8 family = current_cpu_data.x86;
__u8 cpu_model = current_cpu_data.x86_model;
if (!cpu_has_apic)
return 0;
......@@ -230,26 +280,29 @@ int __init nmi_init(struct oprofile_operations ** ops, enum oprofile_cpu * cpu)
if (family < 6)
return 0;
model = &op_athlon_spec;
*cpu = OPROFILE_CPU_ATHLON;
nmi_ops.cpu_type = "i386/athlon";
break;
#ifndef CONFIG_X86_64
#if !defined(CONFIG_X86_64)
case X86_VENDOR_INTEL:
/* Less than a P6-class processor */
if (family != 6)
return 0;
if (cpu_model > 5) {
*cpu = OPROFILE_CPU_PIII;
} else if (cpu_model > 2) {
*cpu = OPROFILE_CPU_PII;
} else {
*cpu = OPROFILE_CPU_PPRO;
switch (family) {
/* Pentium IV */
case 0xf:
if (!p4_init())
return 0;
break;
/* A P6-class processor */
case 6:
if (!ppro_init())
return 0;
break;
default:
return 0;
}
model = &op_ppro_spec;
break;
#endif
#endif /* !CONFIG_X86_64 */
default:
return 0;
......
......@@ -10,7 +10,7 @@
#ifndef OP_COUNTER_H
#define OP_COUNTER_H
#define OP_MAX_COUNTER 4
#define OP_MAX_COUNTER 8
/* Per-perfctr configuration as set via
* oprofilefs.
......
......@@ -96,10 +96,13 @@ static int athlon_check_ctrs(unsigned int const cpu,
{
unsigned int low, high;
int i;
unsigned long eip = instruction_pointer(regs);
int is_kernel = !user_mode(regs);
for (i = 0 ; i < NUM_COUNTERS; ++i) {
CTR_READ(low, high, msrs, i);
if (CTR_OVERFLOWED(low)) {
oprofile_add_sample(instruction_pointer(regs), i, cpu);
oprofile_add_sample(eip, is_kernel, i, cpu);
CTR_WRITE(reset_value[i], msrs, i);
return 1;
}
......
This diff is collapsed.
......@@ -90,11 +90,13 @@ static int ppro_check_ctrs(unsigned int const cpu,
{
unsigned int low, high;
int i;
unsigned long eip = instruction_pointer(regs);
int is_kernel = !user_mode(regs);
for (i = 0 ; i < NUM_COUNTERS; ++i) {
CTR_READ(low, high, msrs, i);
if (CTR_OVERFLOWED(low)) {
oprofile_add_sample(instruction_pointer(regs), i, cpu);
oprofile_add_sample(eip, is_kernel, i, cpu);
CTR_WRITE(reset_value[i], msrs, i);
return 1;
}
......
......@@ -11,8 +11,8 @@
#ifndef OP_X86_MODEL_H
#define OP_X86_MODEL_H
/* will need re-working for Pentium IV */
#define MAX_MSR 4
/* Pentium IV needs all these */
#define MAX_MSR 63
struct op_saved_msr {
unsigned int high;
......@@ -47,6 +47,8 @@ struct op_x86_model_spec {
};
extern struct op_x86_model_spec const op_ppro_spec;
extern struct op_x86_model_spec const op_p4_spec;
extern struct op_x86_model_spec const op_p4_ht2_spec;
extern struct op_x86_model_spec const op_athlon_spec;
#endif /* OP_X86_MODEL_H */
......@@ -20,8 +20,9 @@ static int timer_notify(struct notifier_block * self, unsigned long val, void *
{
struct pt_regs * regs = (struct pt_regs *)data;
int cpu = smp_processor_id();
unsigned long eip = instruction_pointer(regs);
oprofile_add_sample(instruction_pointer(regs), 0, cpu);
oprofile_add_sample(eip, !user_mode(regs), 0, cpu);
return 0;
}
......@@ -45,13 +46,13 @@ static void timer_stop(void)
static struct oprofile_operations timer_ops = {
.start = timer_start,
.stop = timer_stop
.stop = timer_stop,
.cpu_type = "timer"
};
void __init timer_init(struct oprofile_operations ** ops, enum oprofile_cpu * cpu)
void __init timer_init(struct oprofile_operations ** ops)
{
*ops = &timer_ops;
*cpu = OPROFILE_CPU_TIMER;
printk(KERN_INFO "oprofile: using timer interrupt.\n");
}
......@@ -96,6 +96,18 @@ set_huge_pte (struct mm_struct *mm, struct vm_area_struct *vma,
return;
}
/*
* This function checks for proper alignment of input addr and len parameters.
*/
int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
return -EINVAL;
return 0;
}
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma)
{
......
......@@ -11,10 +11,10 @@
#include <linux/oprofile.h>
#include <linux/init.h>
extern void timer_init(struct oprofile_operations ** ops, enum oprofile_cpu * cpu);
extern void timer_init(struct oprofile_operations ** ops);
int __init oprofile_arch_init(struct oprofile_operations ** ops, enum oprofile_cpu * cpu)
int __init oprofile_arch_init(struct oprofile_operations ** ops)
{
timer_init(ops, cpu);
timer_init(ops);
return 0;
}
......@@ -19,8 +19,10 @@ static int timer_notify(struct notifier_block * self, unsigned long val, void *
{
struct pt_regs * regs = (struct pt_regs *)data;
int cpu = smp_processor_id();
unsigned long pc = regs->iaoq[0];
int is_kernel = !user_mode(regs);
oprofile_add_sample(regs->iaoq[0], 0, cpu);
oprofile_add_sample(pc, is_kernel, 0, cpu);
return 0;
}
......@@ -44,13 +46,13 @@ static void timer_stop(void)
static struct oprofile_operations timer_ops = {
.start = timer_start,
.stop = timer_stop
.stop = timer_stop,
.cpu_type = "timer"
};
void __init timer_init(struct oprofile_operations ** ops, enum oprofile_cpu * cpu)
void __init timer_init(struct oprofile_operations ** ops)
{
*ops = &timer_ops;
*cpu = OPROFILE_CPU_TIMER;
printk(KERN_INFO "oprofile: using timer interrupt.\n");
}
......@@ -211,9 +211,7 @@ static int rtasd(void *unused)
DEBUG("will sleep for %d jiffies\n", (HZ*60/rtas_event_scan_rate) / 2);
daemonize();
sigfillset(&current->blocked);
sprintf(current->comm, "rtasd");
daemonize("rtasd");
#if 0
/* Rusty unreal time task */
......
......@@ -11,10 +11,10 @@
#include <linux/oprofile.h>
#include <linux/init.h>
extern void timer_init(struct oprofile_operations ** ops, enum oprofile_cpu * cpu);
extern void timer_init(struct oprofile_operations ** ops);
int __init oprofile_arch_init(struct oprofile_operations ** ops, enum oprofile_cpu * cpu)
int __init oprofile_arch_init(struct oprofile_operations ** ops)
{
timer_init(ops, cpu);
timer_init(ops);
return 0;
}
......@@ -19,8 +19,10 @@ static int timer_notify(struct notifier_block * self, unsigned long val, void *
{
struct pt_regs * regs = (struct pt_regs *)data;
int cpu = smp_processor_id();
unsigned long pc = instruction_pointer(regs);
int is_kernel = !user_mode(regs);
oprofile_add_sample(instruction_pointer(regs), 0, cpu);
oprofile_add_sample(pc, is_kernel, 0, cpu);
return 0;
}
......@@ -44,13 +46,13 @@ static void timer_stop(void)
static struct oprofile_operations timer_ops = {
.start = timer_start,
.stop = timer_stop
.stop = timer_stop,
.cpu_type = "timer"
};
void __init timer_init(struct oprofile_operations ** ops, enum oprofile_cpu * cpu)
void __init timer_init(struct oprofile_operations ** ops)
{
*ops = &timer_ops;
*cpu = OPROFILE_CPU_TIMER;
printk(KERN_INFO "oprofile: using timer interrupt.\n");
}
......@@ -110,7 +110,7 @@ config QDIO
This driver is also available as a module ( = code which can be
inserted in and removed from the running kernel whenever you want).
The module will be called qdio.o. If you want to compile it as a
The module will be called qdio. If you want to compile it as a
module, say M here and read <file:Documentation/modules.txt>.
If unsure, say Y.
......@@ -210,7 +210,7 @@ config BINFMT_ELF
If you want to compile this as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/modules.txt>. The module
will be called binfmt_elf.o. Saying M or N here is dangerous because
will be called binfmt_elf. Saying M or N here is dangerous because
some crucial programs on your system might be in ELF format.
config BINFMT_MISC
......@@ -235,7 +235,7 @@ config BINFMT_MISC
use this part of the kernel.
You may say M here for module support and later load the module when
you have use for it; the module is called binfmt_misc.o. If you
you have use for it; the module is called binfmt_misc. If you
don't know what to answer at this point, say Y.
config PROCESS_DEBUG
......
......@@ -124,7 +124,7 @@ config QDIO
This driver is also available as a module ( = code which can be
inserted in and removed from the running kernel whenever you want).
The module will be called qdio.o. If you want to compile it as a
The module will be called qdio. If you want to compile it as a
module, say M here and read <file:Documentation/modules.txt>.
If unsure, say Y.
......@@ -224,7 +224,7 @@ config BINFMT_ELF
If you want to compile this as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/modules.txt>. The module
will be called binfmt_elf.o. Saying M or N here is dangerous because
will be called binfmt_elf. Saying M or N here is dangerous because
some crucial programs on your system might be in ELF format.
config BINFMT_MISC
......@@ -249,7 +249,7 @@ config BINFMT_MISC
use this part of the kernel.
You may say M here for module support and later load the module when
you have use for it; the module is called binfmt_misc.o. If you
you have use for it; the module is called binfmt_misc. If you
don't know what to answer at this point, say Y.
config PROCESS_DEBUG
......
......@@ -65,14 +65,11 @@ static int powerd(void *__unused)
static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
char *argv[] = { "/sbin/shutdown", "-h", "now", NULL };
daemonize();
sprintf(current->comm, "powerd");
daemonize("powerd");
again:
while (button_pressed == 0) {
spin_lock_irq(&current->sighand->siglock);
flush_signals(current);
spin_unlock_irq(&current->sighand->siglock);
interruptible_sleep_on(&powerd_wait);
}
......
......@@ -232,6 +232,18 @@ make_hugetlb_pages_present(unsigned long addr, unsigned long end, int flags)
return -1;
}
/*
* This function checks for proper alignment of input addr and len parameters.
*/
int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
return -EINVAL;
return 0;
}
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma)
{
......
......@@ -11,10 +11,10 @@
#include <linux/oprofile.h>
#include <linux/init.h>
extern void timer_init(struct oprofile_operations ** ops, enum oprofile_cpu * cpu);
extern void timer_init(struct oprofile_operations ** ops);
int __init oprofile_arch_init(struct oprofile_operations ** ops, enum oprofile_cpu * cpu)
int __init oprofile_arch_init(struct oprofile_operations ** ops)
{
timer_init(ops, cpu);
timer_init(ops);
return 0;
}
......@@ -19,8 +19,10 @@ static int timer_notify(struct notifier_block * self, unsigned long val, void *
{
struct pt_regs * regs = (struct pt_regs *)data;
int cpu = smp_processor_id();
unsigned long pc = instruction_pointer(regs);
int is_kernel = !user_mode(regs);
oprofile_add_sample(instruction_pointer(regs), 0, cpu);
oprofile_add_sample(pc, is_kernel, 0, cpu);
return 0;
}
......@@ -44,13 +46,13 @@ static void timer_stop(void)
static struct oprofile_operations timer_ops = {
.start = timer_start,
.stop = timer_stop
.stop = timer_stop,
.cpu_type = "timer"
};
void __init timer_init(struct oprofile_operations ** ops, enum oprofile_cpu * cpu)
void __init timer_init(struct oprofile_operations ** ops)
{
*ops = &timer_ops;
*cpu = OPROFILE_CPU_TIMER;
printk(KERN_INFO "oprofile: using timer interrupt.\n");
}
......@@ -86,6 +86,18 @@ static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struc
set_pte(page_table, entry);
}
/*
* This function checks for proper alignment of input addr and len parameters.
*/
int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
return -EINVAL;
return 0;
}
int
copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma)
......
......@@ -1731,12 +1731,17 @@ static boolean DAC960_V2_ReadControllerConfiguration(DAC960_Controller_T
if (!DAC960_V2_NewLogicalDeviceInfo(Controller, LogicalDeviceNumber))
break;
LogicalDeviceNumber = NewLogicalDeviceInfo->LogicalDeviceNumber;
if (LogicalDeviceNumber > DAC960_MaxLogicalDrives)
panic("DAC960: Logical Drive Number %d not supported\n",
LogicalDeviceNumber);
if (NewLogicalDeviceInfo->DeviceBlockSizeInBytes != DAC960_BlockSize)
panic("DAC960: Logical Drive Block Size %d not supported\n",
NewLogicalDeviceInfo->DeviceBlockSizeInBytes);
if (LogicalDeviceNumber >= DAC960_MaxLogicalDrives) {
DAC960_Error("DAC960: Logical Drive Number %d not supported\n",
Controller, LogicalDeviceNumber);
break;
}
if (NewLogicalDeviceInfo->DeviceBlockSizeInBytes != DAC960_BlockSize) {
DAC960_Error("DAC960: Logical Drive Block Size %d not supported\n",
Controller, NewLogicalDeviceInfo->DeviceBlockSizeInBytes);
LogicalDeviceNumber++;
continue;
}
PhysicalDevice.Controller = 0;
PhysicalDevice.Channel = NewLogicalDeviceInfo->Channel;
PhysicalDevice.TargetID = NewLogicalDeviceInfo->TargetID;
......
......@@ -345,7 +345,7 @@ static int cciss_open(struct inode *inode, struct file *filep)
printk(KERN_DEBUG "cciss_open %x (%x:%x)\n", inode->i_rdev, ctlr, dsk);
#endif /* CCISS_DEBUG */
if (ctlr > MAX_CTLR || hba[ctlr] == NULL)
if (ctlr >= MAX_CTLR || hba[ctlr] == NULL)
return -ENXIO;
/*
* Root is allowed to open raw volume zero even if its not configured
......
......@@ -74,7 +74,7 @@ void blk_unregister_region(dev_t dev, unsigned long range)
down_write(&block_subsys.rwsem);
for (s = &probes[index]; *s; s = &(*s)->next) {
struct blk_probe *p = *s;
if (p->dev == dev || p->range == range) {
if (p->dev == dev && p->range == range) {
*s = p->next;
kfree(p);
break;
......
......@@ -677,7 +677,7 @@ static char *rq_flags[] = {
"REQ_SENSE",
"REQ_FAILED",
"REQ_QUIET",
"REQ_SPECIAL"
"REQ_SPECIAL",
"REQ_DRIVE_CMD",
"REQ_DRIVE_TASK",
"REQ_DRIVE_TASKFILE",
......
......@@ -577,18 +577,12 @@ static int loop_thread(void *data)
struct loop_device *lo = data;
struct bio *bio;
daemonize();
daemonize("loop%d", lo->lo_number);
sprintf(current->comm, "loop%d", lo->lo_number);
current->flags |= PF_IOTHREAD; /* loop can be used in an encrypted device
hence, it mustn't be stopped at all because it could
be indirectly used during suspension */
spin_lock_irq(&current->sighand->siglock);
sigfillset(&current->blocked);
flush_signals(current);
spin_unlock_irq(&current->sighand->siglock);
set_user_nice(current, -20);
lo->lo_state = Lo_bound;
......
......@@ -61,16 +61,6 @@ config AGP_VIA
You should say Y here if you use XFree86 3.3.6 or 4.x and want to
use GLX or DRI. If unsure, say N.
config AGP_VIA_KT400
tristate "VIA KT400 chipset support"
depends on AGP3
help
This option gives you AGP support for the GLX component of the
XFree86 4.x on VIA KT400 AGP 3.0 chipsets.
You should say Y here if you use XFree86 3.3.6 or 4.x and want to
use GLX or DRI. If unsure, say N.
config AGP_AMD
tristate "AMD Irongate, 761, and 762 support"
depends on AGP
......@@ -141,6 +131,11 @@ config AGP_HP_ZX1
This option gives you AGP GART support for the HP ZX1 chipset
for IA64 processors.
config AGP_ALPHA_CORE
tristate
depends on AGP && (ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL)
default AGP
# Put AGP 3.0 entries below here.
config AGP_I7505
......
......@@ -10,7 +10,6 @@ obj-$(CONFIG_AGP) += agpgart.o
obj-$(CONFIG_AGP_INTEL) += intel-agp.o
obj-$(CONFIG_AGP_VIA) += via-agp.o
obj-$(CONFIG_AGP_VIA_KT400) += via-kt400.o
obj-$(CONFIG_AGP_AMD) += amd-k7-agp.o
obj-$(CONFIG_AGP_SIS) += sis-agp.o
obj-$(CONFIG_AGP_ALI) += ali-agp.o
......@@ -18,6 +17,7 @@ obj-$(CONFIG_AGP_SWORKS) += sworks-agp.o
obj-$(CONFIG_AGP_I460) += i460-agp.o
obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o
obj-$(CONFIG_AGP_AMD_8151) += amd-k8-agp.o
obj-$(CONFIG_AGP_ALPHA_CORE) += alpha-agp.o
obj-$(CONFIG_AGP_I7x05) += i7x05-agp.o
......@@ -30,7 +30,7 @@
#include <asm/agp.h> /* for flush_agp_cache() */
extern struct agp_bridge_data agp_bridge;
extern struct agp_bridge_data *agp_bridge;
#define PFX "agpgart: "
......@@ -128,6 +128,7 @@ struct agp_bridge_data {
int num_aperture_sizes;
int capndx;
int cant_use_aperture;
struct vm_operations_struct *vm_ops;
/* Links to driver specific functions */
......@@ -165,20 +166,20 @@ struct agp_bridge_data {
#define MB(x) (KB (KB (x)))
#define GB(x) (MB (KB (x)))
#define CACHE_FLUSH agp_bridge.cache_flush
#define CACHE_FLUSH agp_bridge->cache_flush
#define A_SIZE_8(x) ((struct aper_size_info_8 *) x)
#define A_SIZE_16(x) ((struct aper_size_info_16 *) x)
#define A_SIZE_32(x) ((struct aper_size_info_32 *) x)
#define A_SIZE_LVL2(x) ((struct aper_size_info_lvl2 *) x)
#define A_SIZE_FIX(x) ((struct aper_size_info_fixed *) x)
#define A_IDX8() (A_SIZE_8(agp_bridge.aperture_sizes) + i)
#define A_IDX16() (A_SIZE_16(agp_bridge.aperture_sizes) + i)
#define A_IDX32() (A_SIZE_32(agp_bridge.aperture_sizes) + i)
#define A_IDXLVL2() (A_SIZE_LVL2(agp_bridge.aperture_sizes) + i)
#define A_IDXFIX() (A_SIZE_FIX(agp_bridge.aperture_sizes) + i)
#define A_IDX8() (A_SIZE_8(agp_bridge->aperture_sizes) + i)
#define A_IDX16() (A_SIZE_16(agp_bridge->aperture_sizes) + i)
#define A_IDX32() (A_SIZE_32(agp_bridge->aperture_sizes) + i)
#define A_IDXLVL2() (A_SIZE_LVL2(agp_bridge->aperture_sizes) + i)
#define A_IDXFIX() (A_SIZE_FIX(agp_bridge->aperture_sizes) + i)
#define MAXKEY (4096 * 32)
#define PGE_EMPTY(p) (!(p) || (p) == (unsigned long) agp_bridge.scratch_page)
#define PGE_EMPTY(p) (!(p) || (p) == (unsigned long) agp_bridge->scratch_page)
/* intel register */
#define INTEL_APBASE 0x10
......@@ -366,7 +367,7 @@ struct agp_driver {
/* Generic routines. */
void agp_generic_agp_enable(u32 mode);
int agp_generic_agp_3_0_enable(u32 mode);
void agp_generic_agp_3_0_enable(u32 mode);
int agp_generic_create_gatt_table(void);
int agp_generic_free_gatt_table(void);
agp_memory *agp_create_memory(int scratch_pages);
......
......@@ -17,15 +17,15 @@ static int ali_fetch_size(void)
u32 temp;
struct aper_size_info_32 *values;
pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp);
pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
temp &= ~(0xfffffff0);
values = A_SIZE_32(agp_bridge.aperture_sizes);
values = A_SIZE_32(agp_bridge->aperture_sizes);
for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
for (i = 0; i < agp_bridge->num_aperture_sizes; i++) {
if (temp == values[i].size_value) {
agp_bridge.previous_size =
agp_bridge.current_size = (void *) (values + i);
agp_bridge.aperture_size_idx = i;
agp_bridge->previous_size =
agp_bridge->current_size = (void *) (values + i);
agp_bridge->aperture_size_idx = i;
return values[i].size;
}
}
......@@ -37,9 +37,9 @@ static void ali_tlbflush(agp_memory * mem)
{
u32 temp;
pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
// clear tag
pci_write_config_dword(agp_bridge.dev, ALI_TAGCTRL,
pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL,
((temp & 0xfffffff0) | 0x00000001|0x00000002));
}
......@@ -48,15 +48,15 @@ static void ali_cleanup(void)
struct aper_size_info_32 *previous_size;
u32 temp;
previous_size = A_SIZE_32(agp_bridge.previous_size);
previous_size = A_SIZE_32(agp_bridge->previous_size);
pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
// clear tag
pci_write_config_dword(agp_bridge.dev, ALI_TAGCTRL,
pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL,
((temp & 0xffffff00) | 0x00000001|0x00000002));
pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp);
pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE,
pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE,
((temp & 0x00000ff0) | previous_size->size_value));
}
......@@ -65,24 +65,24 @@ static int ali_configure(void)
u32 temp;
struct aper_size_info_32 *current_size;
current_size = A_SIZE_32(agp_bridge.current_size);
current_size = A_SIZE_32(agp_bridge->current_size);
/* aperture size and gatt addr */
pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp);
temp = (((temp & 0x00000ff0) | (agp_bridge.gatt_bus_addr & 0xfffff000))
pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
temp = (((temp & 0x00000ff0) | (agp_bridge->gatt_bus_addr & 0xfffff000))
| (current_size->size_value & 0xf));
pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE, temp);
pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE, temp);
/* tlb control */
pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, ((temp & 0xffffff00) | 0x00000010));
pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, ((temp & 0xffffff00) | 0x00000010));
/* address to map to */
pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp);
agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
pci_read_config_dword(agp_bridge->dev, ALI_APBASE, &temp);
agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
#if 0
if (agp_bridge.type == ALI_M1541) {
if (agp_bridge->type == ALI_M1541) {
u32 nlvm_addr = 0;
switch (current_size->size_value) {
......@@ -101,15 +101,15 @@ static int ali_configure(void)
nlvm_addr--;
nlvm_addr&=0xfff00000;
nlvm_addr+= agp_bridge.gart_bus_addr;
nlvm_addr|=(agp_bridge.gart_bus_addr>>12);
nlvm_addr+= agp_bridge->gart_bus_addr;
nlvm_addr|=(agp_bridge->gart_bus_addr>>12);
printk(KERN_INFO PFX "nlvm top &base = %8x\n",nlvm_addr);
}
#endif
pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
temp &= 0xffffff7f; //enable TLB
pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, temp);
pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, temp);
return 0;
}
......@@ -118,23 +118,23 @@ static unsigned long ali_mask_memory(unsigned long addr, int type)
{
/* Memory type is ignored */
return addr | agp_bridge.masks[0].mask;
return addr | agp_bridge->masks[0].mask;
}
static void ali_cache_flush(void)
{
global_cache_flush();
if (agp_bridge.type == ALI_M1541) {
if (agp_bridge->type == ALI_M1541) {
int i, page_count;
u32 temp;
page_count = 1 << A_SIZE_32(agp_bridge.current_size)->page_order;
page_count = 1 << A_SIZE_32(agp_bridge->current_size)->page_order;
for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) {
pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL,
pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
(agp_bridge.gatt_bus_addr + i)) |
(agp_bridge->gatt_bus_addr + i)) |
ALI_CACHE_FLUSH_EN));
}
}
......@@ -148,9 +148,9 @@ static void *ali_alloc_page(void)
if (adr == 0)
return 0;
if (agp_bridge.type == ALI_M1541) {
pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL,
if (agp_bridge->type == ALI_M1541) {
pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
virt_to_phys(adr)) |
ALI_CACHE_FLUSH_EN ));
......@@ -167,9 +167,9 @@ static void ali_destroy_page(void * addr)
global_cache_flush();
if (agp_bridge.type == ALI_M1541) {
pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL,
if (agp_bridge->type == ALI_M1541) {
pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
virt_to_phys(addr)) |
ALI_CACHE_FLUSH_EN));
......@@ -197,30 +197,30 @@ static struct aper_size_info_32 ali_generic_sizes[7] =
static int __init ali_generic_setup (struct pci_dev *pdev)
{
agp_bridge.masks = ali_generic_masks;
agp_bridge.aperture_sizes = (void *) ali_generic_sizes;
agp_bridge.size_type = U32_APER_SIZE;
agp_bridge.num_aperture_sizes = 7;
agp_bridge.dev_private_data = NULL;
agp_bridge.needs_scratch_page = FALSE;
agp_bridge.configure = ali_configure;
agp_bridge.fetch_size = ali_fetch_size;
agp_bridge.cleanup = ali_cleanup;
agp_bridge.tlb_flush = ali_tlbflush;
agp_bridge.mask_memory = ali_mask_memory;
agp_bridge.agp_enable = agp_generic_agp_enable;
agp_bridge.cache_flush = ali_cache_flush;
agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
agp_bridge.insert_memory = agp_generic_insert_memory;
agp_bridge.remove_memory = agp_generic_remove_memory;
agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
agp_bridge.free_by_type = agp_generic_free_by_type;
agp_bridge.agp_alloc_page = ali_alloc_page;
agp_bridge.agp_destroy_page = ali_destroy_page;
agp_bridge.suspend = agp_generic_suspend;
agp_bridge.resume = agp_generic_resume;
agp_bridge.cant_use_aperture = 0;
agp_bridge->masks = ali_generic_masks;
agp_bridge->aperture_sizes = (void *) ali_generic_sizes;
agp_bridge->size_type = U32_APER_SIZE;
agp_bridge->num_aperture_sizes = 7;
agp_bridge->dev_private_data = NULL;
agp_bridge->needs_scratch_page = FALSE;
agp_bridge->configure = ali_configure;
agp_bridge->fetch_size = ali_fetch_size;
agp_bridge->cleanup = ali_cleanup;
agp_bridge->tlb_flush = ali_tlbflush;
agp_bridge->mask_memory = ali_mask_memory;
agp_bridge->agp_enable = agp_generic_agp_enable;
agp_bridge->cache_flush = ali_cache_flush;
agp_bridge->create_gatt_table = agp_generic_create_gatt_table;
agp_bridge->free_gatt_table = agp_generic_free_gatt_table;
agp_bridge->insert_memory = agp_generic_insert_memory;
agp_bridge->remove_memory = agp_generic_remove_memory;
agp_bridge->alloc_by_type = agp_generic_alloc_by_type;
agp_bridge->free_by_type = agp_generic_free_by_type;
agp_bridge->agp_alloc_page = ali_alloc_page;
agp_bridge->agp_destroy_page = ali_destroy_page;
agp_bridge->suspend = agp_generic_suspend;
agp_bridge->resume = agp_generic_resume;
agp_bridge->cant_use_aperture = 0;
return 0;
}
......@@ -313,7 +313,7 @@ static int __init agp_lookup_host_bridge (struct pci_dev *pdev)
printk (KERN_INFO PFX "Detected ALi %s chipset\n",
devs[j].chipset_name);
agp_bridge.type = devs[j].chipset;
agp_bridge->type = devs[j].chipset;
if (devs[j].chipset_setup != NULL)
return devs[j].chipset_setup(pdev);
......@@ -327,7 +327,7 @@ static int __init agp_lookup_host_bridge (struct pci_dev *pdev)
if (agp_try_unsupported) {
printk(KERN_WARNING PFX "Trying generic ALi routines"
" for device id: %04x\n", pdev->device);
agp_bridge.type = ALI_GENERIC;
agp_bridge->type = ALI_GENERIC;
return ali_generic_setup(pdev);
}
......@@ -350,10 +350,10 @@ static int __init agp_ali_probe (struct pci_dev *dev, const struct pci_device_id
/* probe for known chipsets */
if (agp_lookup_host_bridge(dev) != -ENODEV) {
agp_bridge.dev = dev;
agp_bridge.capndx = cap_ptr;
agp_bridge->dev = dev;
agp_bridge->capndx = cap_ptr;
/* Fill in the mode register */
pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx+PCI_AGP_STATUS, &agp_bridge.mode);
pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode);
ali_agp_driver.dev = dev;
agp_register_driver(&ali_agp_driver);
return 0;
......@@ -387,7 +387,7 @@ static int __init agp_ali_init(void)
ret_val = pci_module_init(&agp_ali_pci_driver);
if (ret_val)
agp_bridge.type = NOT_SUPPORTED;
agp_bridge->type = NOT_SUPPORTED;
return ret_val;
}
......
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/agp_backend.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <asm/machvec.h>
#include <asm/agp_backend.h>
#include "../../../arch/alpha/kernel/pci_impl.h"
#include "agp.h"
static struct page *alpha_core_agp_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int write_access)
{
alpha_agp_info *agp = agp_bridge->dev_private_data;
dma_addr_t dma_addr;
unsigned long pa;
struct page *page;
dma_addr = address - vma->vm_start + agp->aperture.bus_base;
pa = agp->ops->translate(agp, dma_addr);
if (pa == (unsigned long)-EINVAL) return NULL; /* no translation */
/*
* Get the page, inc the use count, and return it
*/
page = virt_to_page(__va(pa));
get_page(page);
return page;
}
static struct aper_size_info_fixed alpha_core_agp_sizes[] =
{
{ 0, 0, 0 }, /* filled in by alpha_core_agp_setup */
};
static struct gatt_mask alpha_core_agp_masks[] = {
{ .mask = 0, .type = 0 },
};
struct vm_operations_struct alpha_core_agp_vm_ops = {
.nopage = alpha_core_agp_vm_nopage,
};
static int alpha_core_agp_nop(void)
{
/* just return success */
return 0;
}
static int alpha_core_agp_fetch_size(void)
{
return alpha_core_agp_sizes[0].size;
}
static int alpha_core_agp_configure(void)
{
alpha_agp_info *agp = agp_bridge->dev_private_data;
agp_bridge->gart_bus_addr = agp->aperture.bus_base;
return 0;
}
static void alpha_core_agp_cleanup(void)
{
alpha_agp_info *agp = agp_bridge->dev_private_data;
agp->ops->cleanup(agp);
}
static void alpha_core_agp_tlbflush(agp_memory *mem)
{
alpha_agp_info *agp = agp_bridge->dev_private_data;
alpha_mv.mv_pci_tbi(agp->hose, 0, -1);
}
static unsigned long alpha_core_agp_mask_memory(unsigned long addr, int type)
{
/* Memory type is ignored */
return addr | agp_bridge->masks[0].mask;
}
static void alpha_core_agp_enable(u32 mode)
{
alpha_agp_info *agp = agp_bridge->dev_private_data;
agp->mode.lw = agp_collect_device_status(mode, agp->capability.lw);
agp->mode.bits.enable = 1;
agp->ops->configure(agp);
agp_device_command(agp->mode.lw, 0);
}
static int alpha_core_agp_insert_memory(agp_memory *mem, off_t pg_start,
int type)
{
alpha_agp_info *agp = agp_bridge->dev_private_data;
int num_entries, status;
void *temp;
temp = agp_bridge->current_size;
num_entries = A_SIZE_FIX(temp)->num_entries;
if ((pg_start + mem->page_count) > num_entries) return -EINVAL;
status = agp->ops->bind(agp, pg_start, mem);
mb();
agp_bridge->tlb_flush(mem);
return status;
}
static int alpha_core_agp_remove_memory(agp_memory *mem, off_t pg_start,
int type)
{
alpha_agp_info *agp = agp_bridge->dev_private_data;
int status;
status = agp->ops->unbind(agp, pg_start, mem);
agp_bridge->tlb_flush(mem);
return status;
}
static struct agp_driver alpha_core_agp_driver = {
.owner = THIS_MODULE,
};
int __init
alpha_core_agp_setup(void)
{
alpha_agp_info *agp = alpha_mv.agp_info();
struct aper_size_info_fixed *aper_size;
if (!agp) return -ENODEV;
if (agp->ops->setup(agp)) return -ENODEV;
/*
* Build the aperture size descriptor
*/
aper_size = alpha_core_agp_sizes;
if (!aper_size) return -ENOMEM;
aper_size->size = agp->aperture.size / (1024 * 1024);
aper_size->num_entries = agp->aperture.size / PAGE_SIZE;
aper_size->page_order = ffs(aper_size->num_entries / 1024) - 1;
/*
* Build a fake pci_dev struct
*/
if (!(agp_bridge->dev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL))) {
return -ENOMEM;
}
agp_bridge->dev->vendor = 0xffff;
agp_bridge->dev->device = 0xffff;
agp_bridge->dev->sysdata = agp->hose;
/*
* Fill in the rest of the agp_bridge struct
*/
agp_bridge->masks = alpha_core_agp_masks;
agp_bridge->aperture_sizes = aper_size;
agp_bridge->current_size = aper_size; /* only one entry */
agp_bridge->size_type = FIXED_APER_SIZE;
agp_bridge->num_aperture_sizes = 1;
agp_bridge->dev_private_data = agp;
agp_bridge->needs_scratch_page = FALSE;
agp_bridge->configure = alpha_core_agp_configure;
agp_bridge->fetch_size = alpha_core_agp_fetch_size;
agp_bridge->cleanup = alpha_core_agp_cleanup;
agp_bridge->tlb_flush = alpha_core_agp_tlbflush;
agp_bridge->mask_memory = alpha_core_agp_mask_memory;
agp_bridge->agp_enable = alpha_core_agp_enable;
agp_bridge->cache_flush = global_cache_flush;
agp_bridge->create_gatt_table = alpha_core_agp_nop;
agp_bridge->free_gatt_table = alpha_core_agp_nop;
agp_bridge->insert_memory = alpha_core_agp_insert_memory;
agp_bridge->remove_memory = alpha_core_agp_remove_memory;
agp_bridge->alloc_by_type = agp_generic_alloc_by_type;
agp_bridge->free_by_type = agp_generic_free_by_type;
agp_bridge->agp_alloc_page = agp_generic_alloc_page;
agp_bridge->agp_destroy_page = agp_generic_destroy_page;
agp_bridge->mode = agp->capability.lw;
agp_bridge->cant_use_aperture = 1;
agp_bridgevm_ops = &alpha_core_agp_vm_ops;
alpha_core_agp_driver.dev = agp_bridge->dev;
agp_register_driver(&alpha_core_agp_driver);
printk(KERN_INFO "Detected AGP on hose %d\n", agp->hose->index);
return 0;
}
static int __init agp_alpha_core_init(void)
{
int ret_val = -ENODEV;
if (alpha_mv.agp_info) {
agp_bridge->type = ALPHA_CORE_AGP;
ret_val = alpha_core_agp_setup();
}
return ret_val;
}
static void __exit agp_alpha_core_cleanup(void)
{
agp_unregister_driver(&alpha_core_agp_driver);
/* no pci driver for core */
}
module_init(agp_alpha_core_init);
module_exit(agp_alpha_core_cleanup);
MODULE_AUTHOR("Jeff Wiedemeier <Jeff.Wiedemeier@hp.com>");
MODULE_LICENSE("GPL and additional rights");
This diff is collapsed.
......@@ -70,7 +70,7 @@ static int x86_64_insert_memory(agp_memory * mem, off_t pg_start, int type)
/* gatt table should be empty. */
while (j < (pg_start + mem->page_count)) {
if (!PGE_EMPTY(agp_bridge.gatt_table[j]))
if (!PGE_EMPTY(agp_bridge->gatt_table[j]))
return -EBUSY;
j++;
}
......@@ -81,7 +81,7 @@ static int x86_64_insert_memory(agp_memory * mem, off_t pg_start, int type)
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
addr = agp_bridge.mask_memory(mem->memory[i], mem->type);
addr = agp_bridge->mask_memory(mem->memory[i], mem->type);
tmp = addr;
BUG_ON(tmp & 0xffffff0000000ffc);
......@@ -89,9 +89,9 @@ static int x86_64_insert_memory(agp_memory * mem, off_t pg_start, int type)
pte |=(tmp & 0x00000000fffff000);
pte |= 1<<1|1<<0;
agp_bridge.gatt_table[j] = pte;
agp_bridge->gatt_table[j] = pte;
}
agp_bridge.tlb_flush(mem);
agp_bridge->tlb_flush(mem);
return 0;
}
......@@ -134,12 +134,12 @@ static int amd_x86_64_fetch_size(void)
temp = (temp & 0xe);
values = A_SIZE_32(x86_64_aperture_sizes);
for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
for (i = 0; i < agp_bridge->num_aperture_sizes; i++) {
if (temp == values[i].size_value) {
agp_bridge.previous_size =
agp_bridge.current_size = (void *) (values + i);
agp_bridge->previous_size =
agp_bridge->current_size = (void *) (values + i);
agp_bridge.aperture_size_idx = i;
agp_bridge->aperture_size_idx = i;
return values[i].size;
}
}
......@@ -225,14 +225,14 @@ static int amd_8151_configure(void)
int current_size;
int tmp, tmp2, i;
u64 aperbar;
unsigned long gatt_bus = virt_to_phys(agp_bridge.gatt_table_real);
unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
/* Configure AGP regs in each x86-64 host bridge. */
pci_for_each_dev(dev) {
if (dev->bus->number==0 &&
PCI_FUNC(dev->devfn)==3 &&
PCI_SLOT(dev->devfn)>=24 && PCI_SLOT(dev->devfn)<=31) {
agp_bridge.gart_bus_addr = amd_x86_64_configure(dev,gatt_bus);
agp_bridge->gart_bus_addr = amd_x86_64_configure(dev,gatt_bus);
hammer = dev;
/*
......@@ -248,7 +248,7 @@ static int amd_8151_configure(void)
/* Shadow x86-64 registers into 8151 registers. */
dev = agp_bridge.dev;
dev = agp_bridge->dev;
if (!dev)
return -ENODEV;
......@@ -315,7 +315,7 @@ static void amd_8151_cleanup(void)
static unsigned long amd_8151_mask_memory(unsigned long addr, int type)
{
return addr | agp_bridge.masks[0].mask;
return addr | agp_bridge->masks[0].mask;
}
......@@ -368,12 +368,12 @@ static void agp_x86_64_agp_enable(u32 mode)
}
pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx+PCI_AGP_STATUS, &command);
pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+PCI_AGP_STATUS, &command);
command = agp_collect_device_status(mode, command);
command |= 0x100;
pci_write_config_dword(agp_bridge.dev, agp_bridge.capndx+PCI_AGP_COMMAND, command);
pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+PCI_AGP_COMMAND, command);
agp_device_command(command, 1);
}
......@@ -381,30 +381,30 @@ static void agp_x86_64_agp_enable(u32 mode)
static int __init amd_8151_setup (struct pci_dev *pdev)
{
agp_bridge.masks = amd_8151_masks;
agp_bridge.aperture_sizes = (void *) amd_8151_sizes;
agp_bridge.size_type = U32_APER_SIZE;
agp_bridge.num_aperture_sizes = 7;
agp_bridge.dev_private_data = NULL;
agp_bridge.needs_scratch_page = FALSE;
agp_bridge.configure = amd_8151_configure;
agp_bridge.fetch_size = amd_x86_64_fetch_size;
agp_bridge.cleanup = amd_8151_cleanup;
agp_bridge.tlb_flush = amd_x86_64_tlbflush;
agp_bridge.mask_memory = amd_8151_mask_memory;
agp_bridge.agp_enable = agp_x86_64_agp_enable;
agp_bridge.cache_flush = global_cache_flush;
agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
agp_bridge.insert_memory = x86_64_insert_memory;
agp_bridge.remove_memory = agp_generic_remove_memory;
agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
agp_bridge.free_by_type = agp_generic_free_by_type;
agp_bridge.agp_alloc_page = agp_generic_alloc_page;
agp_bridge.agp_destroy_page = agp_generic_destroy_page;
agp_bridge.suspend = agp_generic_suspend;
agp_bridge.resume = agp_generic_resume;
agp_bridge.cant_use_aperture = 0;
agp_bridge->masks = amd_8151_masks;
agp_bridge->aperture_sizes = (void *) amd_8151_sizes;
agp_bridge->size_type = U32_APER_SIZE;
agp_bridge->num_aperture_sizes = 7;
agp_bridge->dev_private_data = NULL;
agp_bridge->needs_scratch_page = FALSE;
agp_bridge->configure = amd_8151_configure;
agp_bridge->fetch_size = amd_x86_64_fetch_size;
agp_bridge->cleanup = amd_8151_cleanup;
agp_bridge->tlb_flush = amd_x86_64_tlbflush;
agp_bridge->mask_memory = amd_8151_mask_memory;
agp_bridge->agp_enable = agp_x86_64_agp_enable;
agp_bridge->cache_flush = global_cache_flush;
agp_bridge->create_gatt_table = agp_generic_create_gatt_table;
agp_bridge->free_gatt_table = agp_generic_free_gatt_table;
agp_bridge->insert_memory = x86_64_insert_memory;
agp_bridge->remove_memory = agp_generic_remove_memory;
agp_bridge->alloc_by_type = agp_generic_alloc_by_type;
agp_bridge->free_by_type = agp_generic_free_by_type;
agp_bridge->agp_alloc_page = agp_generic_alloc_page;
agp_bridge->agp_destroy_page = agp_generic_destroy_page;
agp_bridge->suspend = agp_generic_suspend;
agp_bridge->resume = agp_generic_resume;
agp_bridge->cant_use_aperture = 0;
return 0;
}
......@@ -420,11 +420,11 @@ static int __init agp_amdk8_probe (struct pci_dev *dev, const struct pci_device_
if (cap_ptr == 0)
return -ENODEV;
agp_bridge.dev = dev;
agp_bridge.capndx = cap_ptr;
agp_bridge->dev = dev;
agp_bridge->capndx = cap_ptr;
/* Fill in the mode register */
pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx+PCI_AGP_STATUS, &agp_bridge.mode);
pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode);
amd_8151_setup(dev);
amd_k8_agp_driver.dev = dev;
agp_register_driver(&amd_k8_agp_driver);
......@@ -458,9 +458,9 @@ int __init agp_amdk8_init(void)
ret_val = pci_module_init(&agp_amdk8_pci_driver);
if (ret_val)
agp_bridge.type = NOT_SUPPORTED;
agp_bridge->type = NOT_SUPPORTED;
agp_bridge.type = AMD_8151;
agp_bridge->type = AMD_8151;
return ret_val;
}
......
......@@ -44,26 +44,27 @@
#define AGPGART_VERSION_MAJOR 0
#define AGPGART_VERSION_MINOR 100
struct agp_bridge_data agp_bridge = { .type = NOT_SUPPORTED };
struct agp_bridge_data agp_bridge_dummy = { .type = NOT_SUPPORTED };
struct agp_bridge_data *agp_bridge = &agp_bridge_dummy;
int agp_backend_acquire(void)
{
if (agp_bridge.type == NOT_SUPPORTED)
if (agp_bridge->type == NOT_SUPPORTED)
return -EINVAL;
if (atomic_read(&agp_bridge.agp_in_use) != 0)
if (atomic_read(&agp_bridge->agp_in_use) != 0)
return -EBUSY;
atomic_inc(&agp_bridge.agp_in_use);
atomic_inc(&agp_bridge->agp_in_use);
return 0;
}
void agp_backend_release(void)
{
if (agp_bridge.type == NOT_SUPPORTED)
if (agp_bridge->type == NOT_SUPPORTED)
return;
atomic_dec(&agp_bridge.agp_in_use);
atomic_dec(&agp_bridge->agp_in_use);
}
struct agp_max_table {
......@@ -114,38 +115,38 @@ static int agp_backend_initialize(struct pci_dev *dev)
{
int size_value, rc, got_gatt=0, got_keylist=0;
agp_bridge.max_memory_agp = agp_find_max();
agp_bridge.version = &agp_current_version;
agp_bridge->max_memory_agp = agp_find_max();
agp_bridge->version = &agp_current_version;
if (agp_bridge.needs_scratch_page == TRUE) {
if (agp_bridge->needs_scratch_page == TRUE) {
void *addr;
addr = agp_bridge.agp_alloc_page();
addr = agp_bridge->agp_alloc_page();
if (addr == NULL) {
printk(KERN_ERR PFX "unable to get memory for scratch page.\n");
return -ENOMEM;
}
agp_bridge.scratch_page_real = virt_to_phys(addr);
agp_bridge.scratch_page =
agp_bridge.mask_memory(agp_bridge.scratch_page_real, 0);
agp_bridge->scratch_page_real = virt_to_phys(addr);
agp_bridge->scratch_page =
agp_bridge->mask_memory(agp_bridge->scratch_page_real, 0);
}
size_value = agp_bridge.fetch_size();
size_value = agp_bridge->fetch_size();
if (size_value == 0) {
printk(KERN_ERR PFX "unable to determine aperture size.\n");
rc = -EINVAL;
goto err_out;
}
if (agp_bridge.create_gatt_table()) {
if (agp_bridge->create_gatt_table()) {
printk(KERN_ERR PFX "unable to get memory for graphics translation table.\n");
rc = -ENOMEM;
goto err_out;
}
got_gatt = 1;
agp_bridge.key_list = vmalloc(PAGE_SIZE * 4);
if (agp_bridge.key_list == NULL) {
agp_bridge->key_list = vmalloc(PAGE_SIZE * 4);
if (agp_bridge->key_list == NULL) {
printk(KERN_ERR PFX "error allocating memory for key lists.\n");
rc = -ENOMEM;
goto err_out;
......@@ -153,27 +154,27 @@ static int agp_backend_initialize(struct pci_dev *dev)
got_keylist = 1;
/* FIXME vmalloc'd memory not guaranteed contiguous */
memset(agp_bridge.key_list, 0, PAGE_SIZE * 4);
memset(agp_bridge->key_list, 0, PAGE_SIZE * 4);
if (agp_bridge.configure()) {
if (agp_bridge->configure()) {
printk(KERN_ERR PFX "error configuring host chipset.\n");
rc = -EINVAL;
goto err_out;
}
printk(KERN_INFO PFX "AGP aperture is %dM @ 0x%lx\n",
size_value, agp_bridge.gart_bus_addr);
size_value, agp_bridge->gart_bus_addr);
return 0;
err_out:
if (agp_bridge.needs_scratch_page == TRUE) {
agp_bridge.agp_destroy_page(phys_to_virt(agp_bridge.scratch_page_real));
if (agp_bridge->needs_scratch_page == TRUE) {
agp_bridge->agp_destroy_page(phys_to_virt(agp_bridge->scratch_page_real));
}
if (got_gatt)
agp_bridge.free_gatt_table();
agp_bridge->free_gatt_table();
if (got_keylist)
vfree(agp_bridge.key_list);
vfree(agp_bridge->key_list);
return rc;
}
......@@ -181,13 +182,16 @@ static int agp_backend_initialize(struct pci_dev *dev)
/* cannot be __exit b/c as it could be called from __init code */
static void agp_backend_cleanup(void)
{
agp_bridge.cleanup();
agp_bridge.free_gatt_table();
vfree(agp_bridge.key_list);
if (agp_bridge.needs_scratch_page == TRUE) {
agp_bridge.agp_destroy_page(phys_to_virt(agp_bridge.scratch_page_real));
}
if (agp_bridge->cleanup != NULL)
agp_bridge->cleanup();
if (agp_bridge->free_gatt_table != NULL)
agp_bridge->free_gatt_table();
if (agp_bridge->key_list)
vfree(agp_bridge->key_list);
if ((agp_bridge->agp_destroy_page!=NULL) &&
(agp_bridge->needs_scratch_page == TRUE))
agp_bridge->agp_destroy_page(phys_to_virt(agp_bridge->scratch_page_real));
}
static int agp_power(struct pm_dev *dev, pm_request_t rq, void *data)
......@@ -195,9 +199,9 @@ static int agp_power(struct pm_dev *dev, pm_request_t rq, void *data)
switch(rq)
{
case PM_SUSPEND:
return agp_bridge.suspend();
return agp_bridge->suspend();
case PM_RESUME:
agp_bridge.resume();
agp_bridge->resume();
return 0;
}
return 0;
......@@ -248,21 +252,25 @@ int agp_register_driver (struct agp_driver *drv)
/* FIXME: What to do with this? */
inter_module_register("drm_agp", THIS_MODULE, &drm_agp);
pm_register(PM_PCI_DEV, PM_PCI_ID(agp_bridge.dev), agp_power);
pm_register(PM_PCI_DEV, PM_PCI_ID(agp_bridge->dev), agp_power);
agp_count++;
return 0;
frontend_err:
agp_backend_cleanup();
err_out:
agp_bridge.type = NOT_SUPPORTED;
agp_bridge->type = NOT_SUPPORTED;
module_put(drv->owner);
drv->dev = NULL;
return ret_val;
}
int agp_unregister_driver(struct agp_driver *drv)
{
agp_bridge.type = NOT_SUPPORTED;
if (drv->dev==NULL)
return -ENODEV;
agp_bridge->type = NOT_SUPPORTED;
pm_unregister_all(agp_power);
agp_frontend_cleanup();
agp_backend_cleanup();
......@@ -282,8 +290,8 @@ int __init agp_init(void)
already_initialised = 1;
memset(&agp_bridge, 0, sizeof(struct agp_bridge_data));
agp_bridge.type = NOT_SUPPORTED;
memset(agp_bridge, 0, sizeof(struct agp_bridge_data));
agp_bridge->type = NOT_SUPPORTED;
printk(KERN_INFO "Linux agpgart interface v%d.%d (c) Dave Jones\n",
AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR);
......
......@@ -97,7 +97,9 @@ static agp_segment_priv *agp_find_seg_in_client(const agp_client * client,
int size, pgprot_t page_prot)
{
agp_segment_priv *seg;
int num_segments, pg_start, pg_count, i;
int num_segments, i;
off_t pg_start;
size_t pg_count;
pg_start = offset / 4096;
pg_count = size / 4096;
......@@ -174,7 +176,7 @@ static int agp_create_segment(agp_client * client, agp_region * region)
agp_segment_priv **ret_seg;
agp_segment_priv *seg;
agp_segment *user_seg;
int i;
size_t i;
seg = kmalloc((sizeof(agp_segment_priv) * region->seg_count), GFP_KERNEL);
if (seg == NULL) {
......@@ -578,8 +580,7 @@ static int agp_remove_client(pid_t id)
static int agp_mmap(struct file *file, struct vm_area_struct *vma)
{
int size;
int current_size;
unsigned int size, current_size;
unsigned long offset;
agp_client *client;
agp_file_private *priv = (agp_file_private *) file->private_data;
......@@ -611,8 +612,11 @@ static int agp_mmap(struct file *file, struct vm_area_struct *vma)
if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot))
goto out_inval;
if (remap_page_range(vma, vma->vm_start, (kerninfo.aper_base + offset),
size, vma->vm_page_prot)) {
if (kerninfo.vm_ops) {
vma->vm_ops = kerninfo.vm_ops;
} else if (remap_page_range(vma, vma->vm_start,
(kerninfo.aper_base + offset),
size, vma->vm_page_prot)) {
goto out_again;
}
AGP_UNLOCK();
......@@ -623,8 +627,11 @@ static int agp_mmap(struct file *file, struct vm_area_struct *vma)
if (size != current_size)
goto out_inval;
if (remap_page_range(vma, vma->vm_start, kerninfo.aper_base,
size, vma->vm_page_prot)) {
if (kerninfo.vm_ops) {
vma->vm_ops = kerninfo.vm_ops;
} else if (remap_page_range(vma, vma->vm_start,
kerninfo.aper_base,
size, vma->vm_page_prot)) {
goto out_again;
}
AGP_UNLOCK();
......
......@@ -77,7 +77,7 @@ static int agp_3_0_isochronous_node_enable(struct agp_3_0_dev *dev_list, unsigne
struct agp_3_0_dev *dev;
};
struct pci_dev *td = agp_bridge.dev, *dev;
struct pci_dev *td = agp_bridge->dev, *dev;
struct list_head *head = &dev_list->list, *pos;
struct agp_3_0_dev *cur;
struct isoch_data *master, target;
......@@ -117,8 +117,8 @@ static int agp_3_0_isochronous_node_enable(struct agp_3_0_dev *dev_list, unsigne
if((ret = agp_3_0_dev_list_sort(dev_list, ndevs)) != 0)
goto free_and_exit;
pci_read_config_dword(td, agp_bridge.capndx + 0x0c, &tnistat);
pci_read_config_dword(td, agp_bridge.capndx + 0x04, &tstatus);
pci_read_config_dword(td, agp_bridge->capndx + 0x0c, &tnistat);
pci_read_config_dword(td, agp_bridge->capndx + 0x04, &tstatus);
/* Extract power-on defaults from the target */
target.maxbw = (tnistat >> 16) & 0xff;
......@@ -170,13 +170,13 @@ static int agp_3_0_isochronous_node_enable(struct agp_3_0_dev *dev_list, unsigne
* in the target's NISTAT register, so we need to do this now
* to get an accurate value for ISOCH_N later.
*/
pci_read_config_word(td, agp_bridge.capndx + 0x20, &tnicmd);
pci_read_config_word(td, agp_bridge->capndx + 0x20, &tnicmd);
tnicmd &= ~(0x3 << 6);
tnicmd |= target.y << 6;
pci_write_config_word(td, agp_bridge.capndx + 0x20, tnicmd);
pci_write_config_word(td, agp_bridge->capndx + 0x20, tnicmd);
/* Reread the target's ISOCH_N */
pci_read_config_dword(td, agp_bridge.capndx + 0x0c, &tnistat);
pci_read_config_dword(td, agp_bridge->capndx + 0x0c, &tnistat);
target.n = (tnistat >> 8) & 0xff;
/* Calculate the minimum ISOCH_N needed by each master */
......@@ -296,7 +296,7 @@ static int agp_3_0_nonisochronous_node_enable(struct agp_3_0_dev *dev_list, unsi
u32 trq, mrq, rem;
unsigned int cdev = 0;
pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx + 0x04, &tstatus);
pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + 0x04, &tstatus);
trq = (tstatus >> 24) & 0xff;
mrq = trq / ndevs;
......@@ -321,7 +321,7 @@ static int agp_3_0_nonisochronous_node_enable(struct agp_3_0_dev *dev_list, unsi
*/
static int agp_3_0_node_enable(u32 mode, u32 minor)
{
struct pci_dev *td = agp_bridge.dev, *dev;
struct pci_dev *td = agp_bridge->dev, *dev;
u8 bus_num, mcapndx;
u32 isoch, arqsz, cal_cycle, tmp, rate;
u32 tstatus, tcmd, mcmd, mstatus, ncapid;
......@@ -364,7 +364,7 @@ static int agp_3_0_node_enable(u32 mode, u32 minor)
}
/* Extract some power-on defaults from the target */
pci_read_config_dword(td, agp_bridge.capndx + 0x04, &tstatus);
pci_read_config_dword(td, agp_bridge->capndx + 0x04, &tstatus);
isoch = (tstatus >> 17) & 0x1;
arqsz = (tstatus >> 13) & 0x7;
cal_cycle = (tstatus >> 10) & 0x7;
......@@ -470,7 +470,7 @@ static int agp_3_0_node_enable(u32 mode, u32 minor)
* Also set the AGP_ENABLE bit, effectively 'turning on' the
* target (this has to be done _before_ turning on the masters).
*/
pci_read_config_dword(td, agp_bridge.capndx + 0x08, &tcmd);
pci_read_config_dword(td, agp_bridge->capndx + 0x08, &tcmd);
tcmd &= ~(0x7 << 10);
tcmd &= ~0x7;
......@@ -479,7 +479,7 @@ static int agp_3_0_node_enable(u32 mode, u32 minor)
tcmd |= 0x1 << 8;
tcmd |= rate;
pci_write_config_dword(td, agp_bridge.capndx + 0x08, tcmd);
pci_write_config_dword(td, agp_bridge->capndx + 0x08, tcmd);
/*
* Set the target's advertised arqsz value, the minimum supported
......@@ -525,11 +525,11 @@ static int agp_3_0_node_enable(u32 mode, u32 minor)
* (AGP 3.0 devices are required to operate as AGP 2.0 devices
* when not using 3.0 electricals.
*/
int agp_generic_agp_3_0_enable(u32 mode)
void agp_generic_agp_3_0_enable(u32 mode)
{
u32 ncapid, major, minor, agp_3_0;
pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx, &ncapid);
pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx, &ncapid);
major = (ncapid >> 20) & 0xf;
minor = (ncapid >> 16) & 0xf;
......@@ -537,16 +537,13 @@ int agp_generic_agp_3_0_enable(u32 mode)
printk(KERN_INFO PFX "Found an AGP %d.%d compliant device.\n",major, minor);
if(major >= 3) {
pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx + 0x4, &agp_3_0);
pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + 0x4, &agp_3_0);
/*
* Check to see if we are operating in 3.0 mode
*/
if((agp_3_0 >> 3) & 0x1) {
if((agp_3_0 >> 3) & 0x1)
agp_3_0_node_enable(mode, minor);
return TRUE;
}
}
return FALSE;
}
EXPORT_SYMBOL(agp_generic_agp_3_0_enable);
......
This diff is collapsed.
......@@ -176,7 +176,7 @@ static int hp_zx1_fetch_size(void)
size = hp_private.gart_size / MB(1);
hp_zx1_sizes[0].size = size;
agp_bridge.current_size = (void *) &hp_zx1_sizes[0];
agp_bridge->current_size = (void *) &hp_zx1_sizes[0];
return size;
}
......@@ -184,10 +184,10 @@ static int hp_zx1_configure(void)
{
struct _hp_private *hp = &hp_private;
agp_bridge.gart_bus_addr = hp->gart_base;
agp_bridge.capndx = pci_find_capability(agp_bridge.dev, PCI_CAP_ID_AGP);
pci_read_config_dword(agp_bridge.dev,
agp_bridge.capndx + PCI_AGP_STATUS, &agp_bridge.mode);
agp_bridge->gart_bus_addr = hp->gart_base;
agp_bridge->capndx = pci_find_capability(agp_bridge->dev, PCI_CAP_ID_AGP);
pci_read_config_dword(agp_bridge->dev,
agp_bridge->capndx + PCI_AGP_STATUS, &agp_bridge->mode);
if (hp->io_pdir_owner) {
OUTREG64(hp->registers, HP_ZX1_PDIR_BASE,
......@@ -241,7 +241,7 @@ static int hp_zx1_create_gatt_table(void)
}
for (i = 0; i < hp->gatt_entries; i++) {
hp->gatt[i] = (unsigned long) agp_bridge.scratch_page;
hp->gatt[i] = (unsigned long) agp_bridge->scratch_page;
}
return 0;
......@@ -296,11 +296,11 @@ static int hp_zx1_insert_memory(agp_memory * mem, off_t pg_start, int type)
for (k = 0;
k < hp->io_pages_per_kpage;
k++, j++, paddr += hp->io_page_size) {
hp->gatt[j] = agp_bridge.mask_memory(paddr, type);
hp->gatt[j] = agp_bridge->mask_memory(paddr, type);
}
}
agp_bridge.tlb_flush(mem);
agp_bridge->tlb_flush(mem);
return 0;
}
......@@ -316,10 +316,10 @@ static int hp_zx1_remove_memory(agp_memory * mem, off_t pg_start, int type)
io_pg_start = hp->io_pages_per_kpage * pg_start;
io_pg_count = hp->io_pages_per_kpage * mem->page_count;
for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
hp->gatt[i] = agp_bridge.scratch_page;
hp->gatt[i] = agp_bridge->scratch_page;
}
agp_bridge.tlb_flush(mem);
agp_bridge->tlb_flush(mem);
return 0;
}
......@@ -330,39 +330,39 @@ static unsigned long hp_zx1_mask_memory(unsigned long addr, int type)
static int __init hp_zx1_setup (struct pci_dev *pdev __attribute__((unused)))
{
agp_bridge.masks = hp_zx1_masks;
agp_bridge.dev_private_data = NULL;
agp_bridge.size_type = FIXED_APER_SIZE;
agp_bridge.needs_scratch_page = FALSE;
agp_bridge.configure = hp_zx1_configure;
agp_bridge.fetch_size = hp_zx1_fetch_size;
agp_bridge.cleanup = hp_zx1_cleanup;
agp_bridge.tlb_flush = hp_zx1_tlbflush;
agp_bridge.mask_memory = hp_zx1_mask_memory;
agp_bridge.agp_enable = agp_generic_agp_enable;
agp_bridge.cache_flush = global_cache_flush;
agp_bridge.create_gatt_table = hp_zx1_create_gatt_table;
agp_bridge.free_gatt_table = hp_zx1_free_gatt_table;
agp_bridge.insert_memory = hp_zx1_insert_memory;
agp_bridge.remove_memory = hp_zx1_remove_memory;
agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
agp_bridge.free_by_type = agp_generic_free_by_type;
agp_bridge.agp_alloc_page = agp_generic_alloc_page;
agp_bridge.agp_destroy_page = agp_generic_destroy_page;
agp_bridge.cant_use_aperture = 1;
agp_bridge->masks = hp_zx1_masks;
agp_bridge->dev_private_data = NULL;
agp_bridge->size_type = FIXED_APER_SIZE;
agp_bridge->needs_scratch_page = FALSE;
agp_bridge->configure = hp_zx1_configure;
agp_bridge->fetch_size = hp_zx1_fetch_size;
agp_bridge->cleanup = hp_zx1_cleanup;
agp_bridge->tlb_flush = hp_zx1_tlbflush;
agp_bridge->mask_memory = hp_zx1_mask_memory;
agp_bridge->agp_enable = agp_generic_agp_enable;
agp_bridge->cache_flush = global_cache_flush;
agp_bridge->create_gatt_table = hp_zx1_create_gatt_table;
agp_bridge->free_gatt_table = hp_zx1_free_gatt_table;
agp_bridge->insert_memory = hp_zx1_insert_memory;
agp_bridge->remove_memory = hp_zx1_remove_memory;
agp_bridge->alloc_by_type = agp_generic_alloc_by_type;
agp_bridge->free_by_type = agp_generic_free_by_type;
agp_bridge->agp_alloc_page = agp_generic_alloc_page;
agp_bridge->agp_destroy_page = agp_generic_destroy_page;
agp_bridge->cant_use_aperture = 1;
return hp_zx1_ioc_init();
}
static int __init agp_find_supported_device(struct pci_dev *dev)
{
agp_bridge.dev = dev;
agp_bridge->dev = dev;
/* ZX1 LBAs can be either PCI or AGP bridges */
if (pci_find_capability(dev, PCI_CAP_ID_AGP)) {
printk(KERN_INFO PFX "Detected HP ZX1 AGP chipset at %s\n",
dev->slot_name);
agp_bridge.type = HP_ZX1;
agp_bridge.dev = dev;
agp_bridge->type = HP_ZX1;
agp_bridge->dev = dev;
return hp_zx1_setup(dev);
}
return -ENODEV;
......@@ -408,7 +408,7 @@ static int __init agp_hp_init(void)
ret_val = pci_module_init(&agp_hp_pci_driver);
if (ret_val)
agp_bridge.type = NOT_SUPPORTED;
agp_bridge->type = NOT_SUPPORTED;
return ret_val;
}
......
This diff is collapsed.
......@@ -13,16 +13,16 @@ static int intel_7505_fetch_size(void)
/*
* For AGP 3.0 APSIZE is now 16 bits
*/
pci_read_config_word (agp_bridge.dev, INTEL_I7505_APSIZE, &tmp);
pci_read_config_word (agp_bridge->dev, INTEL_I7505_APSIZE, &tmp);
tmp = (tmp & 0xfff);
values = A_SIZE_16(agp_bridge.aperture_sizes);
values = A_SIZE_16(agp_bridge->aperture_sizes);
for (i=0; i < agp_bridge.num_aperture_sizes; i++) {
for (i=0; i < agp_bridge->num_aperture_sizes; i++) {
if (tmp == values[i].size_value) {
agp_bridge.previous_size = agp_bridge.current_size =
agp_bridge->previous_size = agp_bridge->current_size =
(void *)(values + i);
agp_bridge.aperture_size_idx = i;
agp_bridge->aperture_size_idx = i;
return values[i].size;
}
}
......@@ -33,18 +33,18 @@ static int intel_7505_fetch_size(void)
static void intel_7505_tlbflush(agp_memory *mem)
{
u32 temp;
pci_read_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, &temp);
pci_write_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, temp & ~(1 << 7));
pci_read_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, &temp);
pci_write_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, temp | (1 << 7));
pci_read_config_dword(agp_bridge->dev, INTEL_I7505_AGPCTRL, &temp);
pci_write_config_dword(agp_bridge->dev, INTEL_I7505_AGPCTRL, temp & ~(1 << 7));
pci_read_config_dword(agp_bridge->dev, INTEL_I7505_AGPCTRL, &temp);
pci_write_config_dword(agp_bridge->dev, INTEL_I7505_AGPCTRL, temp | (1 << 7));
}
static void intel_7505_cleanup(void)
{
aper_size_info_16 *previous_size;
previous_size = A_SIZE_16(agp_bridge.previous_size);
pci_write_config_byte(agp_bridge.dev, INTEL_I7505_APSIZE,
previous_size = A_SIZE_16(agp_bridge->previous_size);
pci_write_config_byte(agp_bridge->dev, INTEL_I7505_APSIZE,
previous_size->size_value);
}
......@@ -54,25 +54,25 @@ static int intel_7505_configure(void)
u32 temp;
aper_size_info_16 *current_size;
current_size = A_SIZE_16(agp_bridge.current_size);
current_size = A_SIZE_16(agp_bridge->current_size);
/* aperture size */
pci_write_config_word(agp_bridge.dev, INTEL_I7505_APSIZE,
pci_write_config_word(agp_bridge->dev, INTEL_I7505_APSIZE,
current_size->size_value);
/* address to map to */
pci_read_config_dword(agp_bridge.dev, INTEL_I7505_NAPBASELO, &temp);
agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
pci_read_config_dword(agp_bridge->dev, INTEL_I7505_NAPBASELO, &temp);
agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
/* attbase */
pci_write_config_dword(agp_bridge.dev, INTEL_I7505_ATTBASE,
agp_bridge.gatt_bus_addr);
pci_write_config_dword(agp_bridge->dev, INTEL_I7505_ATTBASE,
agp_bridge->gatt_bus_addr);
/* agpctrl */
pci_write_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, 0x0000);
pci_write_config_dword(agp_bridge->dev, INTEL_I7505_AGPCTRL, 0x0000);
/* clear error registers */
pci_write_config_byte(agp_bridge.dev, INTEL_I7505_ERRSTS, 0xff);
pci_write_config_byte(agp_bridge->dev, INTEL_I7505_ERRSTS, 0xff);
return 0;
}
......@@ -95,30 +95,30 @@ static void i7505_setup (u32 mode)
static int __init intel_7505_setup (struct pci_dev *pdev)
{
agp_bridge.masks = intel_generic_masks;
agp_bridge.aperture_sizes = (void *) intel_7505_sizes;
agp_bridge.size_type = U16_APER_SIZE;
agp_bridge.num_aperture_sizes = 7;
agp_bridge.dev_private_data = NULL;
agp_bridge.needs_scratch_page = FALSE;
agp_bridge.configure = intel_7505_configure;
agp_bridge.fetch_size = intel_7505_fetch_size;
agp_bridge.cleanup = intel_7505_cleanup;
agp_bridge.tlb_flush = intel_7505_tlbflush;
agp_bridge.mask_memory = intel_mask_memory;
agp_bridge.agp_enable = i7505_enable;
agp_bridge.cache_flush = global_cache_flush;
agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
agp_bridge.insert_memory = agp_generic_insert_memory;
agp_bridge.remove_memory = agp_generic_remove_memory;
agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
agp_bridge.free_by_type = agp_generic_free_by_type;
agp_bridge.agp_alloc_page = agp_generic_alloc_page;
agp_bridge.agp_destroy_page = agp_generic_destroy_page;
agp_bridge.suspend = agp_generic_suspend;
agp_bridge.resume = agp_generic_resume;
agp_bridge.cant_use_aperture = 0;
agp_bridge->masks = intel_generic_masks;
agp_bridge->aperture_sizes = (void *) intel_7505_sizes;
agp_bridge->size_type = U16_APER_SIZE;
agp_bridge->num_aperture_sizes = 7;
agp_bridge->dev_private_data = NULL;
agp_bridge->needs_scratch_page = FALSE;
agp_bridge->configure = intel_7505_configure;
agp_bridge->fetch_size = intel_7505_fetch_size;
agp_bridge->cleanup = intel_7505_cleanup;
agp_bridge->tlb_flush = intel_7505_tlbflush;
agp_bridge->mask_memory = intel_mask_memory;
agp_bridge->agp_enable = i7505_enable;
agp_bridge->cache_flush = global_cache_flush;
agp_bridge->create_gatt_table = agp_generic_create_gatt_table;
agp_bridge->free_gatt_table = agp_generic_free_gatt_table;
agp_bridge->insert_memory = agp_generic_insert_memory;
agp_bridge->remove_memory = agp_generic_remove_memory;
agp_bridge->alloc_by_type = agp_generic_alloc_by_type;
agp_bridge->free_by_type = agp_generic_free_by_type;
agp_bridge->agp_alloc_page = agp_generic_alloc_page;
agp_bridge->agp_destroy_page = agp_generic_destroy_page;
agp_bridge->suspend = agp_generic_suspend;
agp_bridge->resume = agp_generic_resume;
agp_bridge->cant_use_aperture = 0;
return 0;
}
......@@ -149,7 +149,7 @@ static int __init agp_lookup_host_bridge (struct pci_dev *pdev)
if (pdev->device == devs[j].device_id) {
printk (KERN_INFO PFX "Detected Intel %s chipset\n",
devs[j].chipset_name);
agp_bridge.type = devs[j].chipset;
agp_bridge->type = devs[j].chipset;
if (devs[j].chipset_setup != NULL)
return devs[j].chipset_setup(pdev);
......@@ -177,10 +177,10 @@ static int __init agp_i7x05_probe (struct pci_dev *dev, const struct pci_device_
return -ENODEV;
if (agp_lookup_host_bridge(dev) != -ENODEV) {
agp_bridge.dev = dev;
agp_bridge.capndx = cap_ptr;
agp_bridge->dev = dev;
agp_bridge->capndx = cap_ptr;
/* Fill in the mode register */
pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx+PCI_AGP_STATUS, &agp_bridge.mode)
pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode)
i7x05_agp_driver.dev = dev;
agp_register_driver(&i7x05_agp_driver);
return 0;
......@@ -215,7 +215,7 @@ int __init agp_i7x05_init(void)
ret_val = pci_module_init(&agp_i7x05_pci_driver);
if (ret_val)
agp_bridge.type = NOT_SUPPORTED;
agp_bridge->type = NOT_SUPPORTED;
return ret_val;
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment