Commit 3b89d486 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/davem/sparc-2.6

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 864cb43e 292670c7
PowerNow! and Cool'n'Quiet are AMD names for frequency
management capabilities in AMD processors. As the hardware
implementation changes in new generations of the processors,
there is a different cpu-freq driver for each generation.
Note that the driver's will not load on the "wrong" hardware,
so it is safe to try each driver in turn when in doubt as to
which is the correct driver.
Note that the functionality to change frequency (and voltage)
is not available in all processors. The drivers will refuse
to load on processors without this capability. The capability
is detected with the cpuid instruction.
The drivers use BIOS supplied tables to obtain frequency and
voltage information appropriate for a particular platform.
Frequency transitions will be unavailable if the BIOS does
not supply these tables.
6th Generation: powernow-k6
7th Generation: powernow-k7: Athlon, Duron, Geode.
8th Generation: powernow-k8: Athlon, Athlon 64, Opteron, Sempron.
Documentation on this functionality in 8th generation processors
is available in the "BIOS and Kernel Developer's Guide", publication
26094, in chapter 9, available for download from www.amd.com.
BIOS supplied data, for powernow-k7 and for powernow-k8, may be
from either the PSB table or from ACPI objects. The ACPI support
is only available if the kernel config sets CONFIG_ACPI_PROCESSOR.
The powernow-k8 driver will attempt to use ACPI if so configured,
and fall back to PST if that fails.
The powernow-k7 driver will try to use the PSB support first, and
fall back to ACPI if the PSB support fails. A module parameter,
acpi_force, is provided to force ACPI support to be used instead
of PSB support.
...@@ -581,7 +581,7 @@ config LEDS_CPU ...@@ -581,7 +581,7 @@ config LEDS_CPU
config ALIGNMENT_TRAP config ALIGNMENT_TRAP
bool bool
depends on CPU_32 depends on CPU_32
default y default y if !ARCH_EBSA110
help help
ARM processors can not fetch/store information which is not ARM processors can not fetch/store information which is not
naturally aligned on the bus, i.e., a 4 byte fetch must start at an naturally aligned on the bus, i.e., a 4 byte fetch must start at an
......
This diff is collapsed.
...@@ -67,9 +67,9 @@ u8 __readb(void *addr) ...@@ -67,9 +67,9 @@ u8 __readb(void *addr)
u32 ret, a = __isamem_convert_addr(addr); u32 ret, a = __isamem_convert_addr(addr);
if ((int)addr & 1) if ((int)addr & 1)
ret = __raw_getl(a); ret = __raw_readl(a);
else else
ret = __raw_getb(a); ret = __raw_readb(a);
return ret; return ret;
} }
...@@ -80,7 +80,7 @@ u16 __readw(void *addr) ...@@ -80,7 +80,7 @@ u16 __readw(void *addr)
if ((int)addr & 1) if ((int)addr & 1)
BUG(); BUG();
return __raw_getw(a); return __raw_readw(a);
} }
u32 __readl(void *addr) u32 __readl(void *addr)
...@@ -90,8 +90,8 @@ u32 __readl(void *addr) ...@@ -90,8 +90,8 @@ u32 __readl(void *addr)
if ((int)addr & 3) if ((int)addr & 3)
BUG(); BUG();
ret = __raw_getw(a); ret = __raw_readw(a);
ret |= __raw_getw(a + 4) << 16; ret |= __raw_readw(a + 4) << 16;
return ret; return ret;
} }
...@@ -104,9 +104,9 @@ void __writeb(u8 val, void *addr) ...@@ -104,9 +104,9 @@ void __writeb(u8 val, void *addr)
u32 a = __isamem_convert_addr(addr); u32 a = __isamem_convert_addr(addr);
if ((int)addr & 1) if ((int)addr & 1)
__raw_putl(val, a); __raw_writel(val, a);
else else
__raw_putb(val, a); __raw_writeb(val, a);
} }
void __writew(u16 val, void *addr) void __writew(u16 val, void *addr)
...@@ -116,7 +116,7 @@ void __writew(u16 val, void *addr) ...@@ -116,7 +116,7 @@ void __writew(u16 val, void *addr)
if ((int)addr & 1) if ((int)addr & 1)
BUG(); BUG();
__raw_putw(val, a); __raw_writew(val, a);
} }
void __writel(u32 val, void *addr) void __writel(u32 val, void *addr)
...@@ -126,8 +126,8 @@ void __writel(u32 val, void *addr) ...@@ -126,8 +126,8 @@ void __writel(u32 val, void *addr)
if ((int)addr & 3) if ((int)addr & 3)
BUG(); BUG();
__raw_putw(val, a); __raw_writew(val, a);
__raw_putw(val >> 16, a + 4); __raw_writew(val >> 16, a + 4);
} }
EXPORT_SYMBOL(__writeb); EXPORT_SYMBOL(__writeb);
...@@ -147,7 +147,7 @@ u8 __inb(int port) ...@@ -147,7 +147,7 @@ u8 __inb(int port)
* The SuperIO registers use sane addressing techniques... * The SuperIO registers use sane addressing techniques...
*/ */
if (SUPERIO_PORT(port)) if (SUPERIO_PORT(port))
ret = __raw_getb(ISAIO_BASE + (port << 2)); ret = __raw_readb(ISAIO_BASE + (port << 2));
else { else {
u32 a = ISAIO_BASE + ((port & ~1) << 1); u32 a = ISAIO_BASE + ((port & ~1) << 1);
...@@ -155,9 +155,9 @@ u8 __inb(int port) ...@@ -155,9 +155,9 @@ u8 __inb(int port)
* Shame nothing else does * Shame nothing else does
*/ */
if (port & 1) if (port & 1)
ret = __raw_getl(a); ret = __raw_readl(a);
else else
ret = __raw_getb(a); ret = __raw_readb(a);
} }
return ret; return ret;
} }
...@@ -170,7 +170,7 @@ u16 __inw(int port) ...@@ -170,7 +170,7 @@ u16 __inw(int port)
* The SuperIO registers use sane addressing techniques... * The SuperIO registers use sane addressing techniques...
*/ */
if (SUPERIO_PORT(port)) if (SUPERIO_PORT(port))
ret = __raw_getw(ISAIO_BASE + (port << 2)); ret = __raw_readw(ISAIO_BASE + (port << 2));
else { else {
u32 a = ISAIO_BASE + ((port & ~1) << 1); u32 a = ISAIO_BASE + ((port & ~1) << 1);
...@@ -180,7 +180,7 @@ u16 __inw(int port) ...@@ -180,7 +180,7 @@ u16 __inw(int port)
if (port & 1) if (port & 1)
BUG(); BUG();
ret = __raw_getw(a); ret = __raw_readw(a);
} }
return ret; return ret;
} }
...@@ -201,7 +201,7 @@ void __outb(u8 val, int port) ...@@ -201,7 +201,7 @@ void __outb(u8 val, int port)
* The SuperIO registers use sane addressing techniques... * The SuperIO registers use sane addressing techniques...
*/ */
if (SUPERIO_PORT(port)) if (SUPERIO_PORT(port))
__raw_putb(val, ISAIO_BASE + (port << 2)); __raw_writeb(val, ISAIO_BASE + (port << 2));
else { else {
u32 a = ISAIO_BASE + ((port & ~1) << 1); u32 a = ISAIO_BASE + ((port & ~1) << 1);
...@@ -209,9 +209,9 @@ void __outb(u8 val, int port) ...@@ -209,9 +209,9 @@ void __outb(u8 val, int port)
* Shame nothing else does * Shame nothing else does
*/ */
if (port & 1) if (port & 1)
__raw_putl(val, a); __raw_writel(val, a);
else else
__raw_putb(val, a); __raw_writeb(val, a);
} }
} }
...@@ -230,7 +230,7 @@ void __outw(u16 val, int port) ...@@ -230,7 +230,7 @@ void __outw(u16 val, int port)
BUG(); BUG();
} }
__raw_putw(val, ISAIO_BASE + off); __raw_writew(val, ISAIO_BASE + off);
} }
void __outl(u32 val, int port) void __outl(u32 val, int port)
......
...@@ -230,8 +230,9 @@ static int __init sa1100_cpu_init(struct cpufreq_policy *policy) ...@@ -230,8 +230,9 @@ static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
} }
static struct cpufreq_driver sa1100_driver = { static struct cpufreq_driver sa1100_driver = {
.flags = (CPUFREQ_PANIC_OUTOFSYNC | .flags = CPUFREQ_STICKY |
CPUFREQ_PANIC_RESUME_OUTOFSYNC), CPUFREQ_PANIC_OUTOFSYNC |
CPUFREQ_PANIC_RESUME_OUTOFSYNC,
.verify = sa11x0_verify_speed, .verify = sa11x0_verify_speed,
.target = sa1100_target, .target = sa1100_target,
.get = sa11x0_getspeed, .get = sa11x0_getspeed,
......
...@@ -329,8 +329,9 @@ static int __init sa1110_cpu_init(struct cpufreq_policy *policy) ...@@ -329,8 +329,9 @@ static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
} }
static struct cpufreq_driver sa1110_driver = { static struct cpufreq_driver sa1110_driver = {
.flags = (CPUFREQ_PANIC_OUTOFSYNC | .flags = CPUFREQ_STICKY |
CPUFREQ_PANIC_RESUME_OUTOFSYNC), CPUFREQ_PANIC_OUTOFSYNC |
CPUFREQ_PANIC_RESUME_OUTOFSYNC,
.verify = sa11x0_verify_speed, .verify = sa11x0_verify_speed,
.target = sa1110_target, .target = sa1110_target,
.get = sa11x0_getspeed, .get = sa11x0_getspeed,
......
...@@ -254,6 +254,7 @@ static int elanfreq_cpu_exit(struct cpufreq_policy *policy) ...@@ -254,6 +254,7 @@ static int elanfreq_cpu_exit(struct cpufreq_policy *policy)
static int __init elanfreq_setup(char *str) static int __init elanfreq_setup(char *str)
{ {
max_freq = simple_strtoul(str, &str, 0); max_freq = simple_strtoul(str, &str, 0);
printk(KERN_WARNING "You're using the deprecated elanfreq command line option. Use elanfreq.max_freq instead, please!\n");
return 1; return 1;
} }
__setup("elanfreq=", elanfreq_setup); __setup("elanfreq=", elanfreq_setup);
...@@ -300,7 +301,7 @@ static void __exit elanfreq_exit(void) ...@@ -300,7 +301,7 @@ static void __exit elanfreq_exit(void)
} }
MODULE_PARM (max_freq, "i"); module_param (max_freq, int, 0444);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Robert Schwebel <r.schwebel@pengutronix.de>, Sven Geggus <sven@geggus.net>"); MODULE_AUTHOR("Robert Schwebel <r.schwebel@pengutronix.de>, Sven Geggus <sven@geggus.net>");
......
...@@ -124,7 +124,7 @@ static int stock_freq; ...@@ -124,7 +124,7 @@ static int stock_freq;
/* PCI bus clock - defaults to 30.000 if cpu_khz is not available */ /* PCI bus clock - defaults to 30.000 if cpu_khz is not available */
static int pci_busclk = 0; static int pci_busclk = 0;
MODULE_PARM(pci_busclk, "i"); module_param (pci_busclk, int, 0444);
/* maximum duration for which the cpu may be suspended /* maximum duration for which the cpu may be suspended
* (32us * MAX_DURATION). If no parameter is given, this defaults * (32us * MAX_DURATION). If no parameter is given, this defaults
...@@ -133,7 +133,7 @@ MODULE_PARM(pci_busclk, "i"); ...@@ -133,7 +133,7 @@ MODULE_PARM(pci_busclk, "i");
* is suspended -- processing power is just 0.39% of what it used to be, * is suspended -- processing power is just 0.39% of what it used to be,
* though. 781.25 kHz(!) for a 200 MHz processor -- wow. */ * though. 781.25 kHz(!) for a 200 MHz processor -- wow. */
static int max_duration = 255; static int max_duration = 255;
MODULE_PARM(max_duration, "i"); module_param (max_duration, int, 0444);
/* For the default policy, we want at least some processing power /* For the default policy, we want at least some processing power
* - let's say 5%. (min = maxfreq / POLICY_MIN_DIV) * - let's say 5%. (min = maxfreq / POLICY_MIN_DIV)
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
static unsigned int numscales=16, numvscales; static unsigned int numscales=16, numvscales;
static unsigned int fsb; static unsigned int fsb;
static int minvid, maxvid; static int minvid, maxvid;
static unsigned int minmult, maxmult;
static int can_scale_voltage; static int can_scale_voltage;
static int vrmrev; static int vrmrev;
...@@ -45,11 +46,15 @@ static int debug; ...@@ -45,11 +46,15 @@ static int debug;
static void dprintk(const char *fmt, ...) static void dprintk(const char *fmt, ...)
{ {
char s[256];
va_list args; va_list args;
if (debug == 0) if (debug == 0)
return; return;
va_start(args, fmt); va_start(args, fmt);
printk(fmt, args); vsprintf(s, fmt, args);
printk(s);
va_end(args); va_end(args);
} }
...@@ -65,7 +70,7 @@ static int longhaul_version; ...@@ -65,7 +70,7 @@ static int longhaul_version;
static struct cpufreq_frequency_table *longhaul_table; static struct cpufreq_frequency_table *longhaul_table;
static unsigned int calc_speed (int mult, int fsb) static unsigned int calc_speed(int mult, int fsb)
{ {
int khz; int khz;
khz = (mult/10)*fsb; khz = (mult/10)*fsb;
...@@ -76,7 +81,7 @@ static unsigned int calc_speed (int mult, int fsb) ...@@ -76,7 +81,7 @@ static unsigned int calc_speed (int mult, int fsb)
} }
static int longhaul_get_cpu_mult (void) static int longhaul_get_cpu_mult(void)
{ {
unsigned long invalue=0,lo, hi; unsigned long invalue=0,lo, hi;
...@@ -97,7 +102,7 @@ static int longhaul_get_cpu_mult (void) ...@@ -97,7 +102,7 @@ static int longhaul_get_cpu_mult (void)
* Sets a new clock ratio, and -if applicable- a new Front Side Bus * Sets a new clock ratio, and -if applicable- a new Front Side Bus
*/ */
static void longhaul_setstate (unsigned int clock_ratio_index) static void longhaul_setstate(unsigned int clock_ratio_index)
{ {
int speed, mult; int speed, mult;
struct cpufreq_freqs freqs; struct cpufreq_freqs freqs;
...@@ -193,7 +198,7 @@ static void longhaul_setstate (unsigned int clock_ratio_index) ...@@ -193,7 +198,7 @@ static void longhaul_setstate (unsigned int clock_ratio_index)
#define ROUNDING 0xf #define ROUNDING 0xf
static int _guess (int guess, int maxmult) static int _guess(int guess)
{ {
int target; int target;
...@@ -206,7 +211,7 @@ static int _guess (int guess, int maxmult) ...@@ -206,7 +211,7 @@ static int _guess (int guess, int maxmult)
} }
static int guess_fsb(int maxmult) static int guess_fsb(void)
{ {
int speed = (cpu_khz/1000); int speed = (cpu_khz/1000);
int i; int i;
...@@ -216,18 +221,17 @@ static int guess_fsb(int maxmult) ...@@ -216,18 +221,17 @@ static int guess_fsb(int maxmult)
speed &= ~ROUNDING; speed &= ~ROUNDING;
for (i=0; i<3; i++) { for (i=0; i<3; i++) {
if (_guess(speeds[i],maxmult) == speed) if (_guess(speeds[i]) == speed)
return speeds[i]; return speeds[i];
} }
return 0; return 0;
} }
static int __init longhaul_get_ranges (void) static int __init longhaul_get_ranges(void)
{ {
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = cpu_data;
unsigned long invalue; unsigned long invalue;
unsigned int minmult=0, maxmult=0;
unsigned int multipliers[32]= { unsigned int multipliers[32]= {
50,30,40,100,55,35,45,95,90,70,80,60,120,75,85,65, 50,30,40,100,55,35,45,95,90,70,80,60,120,75,85,65,
-1,110,120,-1,135,115,125,105,130,150,160,140,-1,155,-1,145 }; -1,110,120,-1,135,115,125,105,130,150,160,140,-1,155,-1,145 };
...@@ -248,7 +252,7 @@ static int __init longhaul_get_ranges (void) ...@@ -248,7 +252,7 @@ static int __init longhaul_get_ranges (void)
if (c->x86_model==6) if (c->x86_model==6)
fsb = eblcr_fsb_table_v1[invalue]; fsb = eblcr_fsb_table_v1[invalue];
else else
fsb = guess_fsb(maxmult); fsb = guess_fsb();
break; break;
case 2: case 2:
...@@ -398,7 +402,7 @@ static int longhaul_verify(struct cpufreq_policy *policy) ...@@ -398,7 +402,7 @@ static int longhaul_verify(struct cpufreq_policy *policy)
} }
static int longhaul_target (struct cpufreq_policy *policy, static int longhaul_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int target_freq,
unsigned int relation) unsigned int relation)
{ {
...@@ -422,7 +426,7 @@ static unsigned int longhaul_get(unsigned int cpu) ...@@ -422,7 +426,7 @@ static unsigned int longhaul_get(unsigned int cpu)
return (calc_speed (longhaul_get_cpu_mult(), fsb)); return (calc_speed (longhaul_get_cpu_mult(), fsb));
} }
static int __init longhaul_cpu_init (struct cpufreq_policy *policy) static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
{ {
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = cpu_data;
char *cpuname=NULL; char *cpuname=NULL;
...@@ -536,7 +540,7 @@ static struct cpufreq_driver longhaul_driver = { ...@@ -536,7 +540,7 @@ static struct cpufreq_driver longhaul_driver = {
.attr = longhaul_attr, .attr = longhaul_attr,
}; };
static int __init longhaul_init (void) static int __init longhaul_init(void)
{ {
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = cpu_data;
...@@ -553,8 +557,17 @@ static int __init longhaul_init (void) ...@@ -553,8 +557,17 @@ static int __init longhaul_init (void)
return -ENODEV; return -ENODEV;
} }
static void __exit longhaul_exit (void) static void __exit longhaul_exit(void)
{ {
int i=0;
unsigned int new_clock_ratio;
while (clock_ratio[i] != maxmult)
i++;
new_clock_ratio = longhaul_table[i].index & 0xFF;
longhaul_setstate(new_clock_ratio);
cpufreq_unregister_driver(&longhaul_driver); cpufreq_unregister_driver(&longhaul_driver);
kfree(longhaul_table); kfree(longhaul_table);
} }
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/cpumask.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/msr.h> #include <asm/msr.h>
...@@ -132,18 +133,15 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, ...@@ -132,18 +133,15 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
#endif #endif
/* notifiers */ /* notifiers */
for_each_cpu(i) { for_each_cpu_mask(i, affected_cpu_map) {
if (cpu_isset(i, affected_cpu_map)) {
freqs.cpu = i; freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
} }
}
/* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software
* Developer's Manual, Volume 3 * Developer's Manual, Volume 3
*/ */
for_each_cpu(i) { for_each_cpu_mask(i, affected_cpu_map) {
if (cpu_isset(i, affected_cpu_map)) {
cpumask_t this_cpu = cpumask_of_cpu(i); cpumask_t this_cpu = cpumask_of_cpu(i);
set_cpus_allowed(current, this_cpu); set_cpus_allowed(current, this_cpu);
...@@ -151,16 +149,13 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, ...@@ -151,16 +149,13 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
} }
}
set_cpus_allowed(current, cpus_allowed); set_cpus_allowed(current, cpus_allowed);
/* notifiers */ /* notifiers */
for_each_cpu(i) { for_each_cpu_mask(i, affected_cpu_map) {
if (cpu_isset(i, affected_cpu_map)) {
freqs.cpu = i; freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
} }
}
return 0; return 0;
} }
......
...@@ -733,12 +733,24 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) ...@@ -733,12 +733,24 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
continue; continue;
} }
/* verify only 1 entry from the lo frequency table */ if (fid < HI_FID_TABLE_BOTTOM) {
if ((fid < HI_FID_TABLE_BOTTOM) && (cntlofreq++)) { if (cntlofreq) {
/* if both entries are the same, ignore this
* one...
*/
if ((powernow_table[i].frequency != powernow_table[cntlofreq].frequency) ||
(powernow_table[i].index != powernow_table[cntlofreq].index)) {
printk(KERN_ERR PFX "Too many lo freq table entries\n"); printk(KERN_ERR PFX "Too many lo freq table entries\n");
goto err_out_mem; goto err_out_mem;
} }
dprintk(KERN_INFO PFX "double low frequency table entry, ignoring it.\n");
powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
continue;
} else
cntlofreq = i;
}
if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) { if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) {
printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n",
powernow_table[i].frequency, powernow_table[i].frequency,
...@@ -857,12 +869,9 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi ...@@ -857,12 +869,9 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
if (smp_processor_id() != pol->cpu) { if (smp_processor_id() != pol->cpu) {
printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu); printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu);
goto sched_out; goto err_out;
} }
/* from this point, do not exit without restoring preempt and cpu */
preempt_disable();
if (pending_bit_stuck()) { if (pending_bit_stuck()) {
printk(KERN_ERR PFX "failing targ, change pending bit set\n"); printk(KERN_ERR PFX "failing targ, change pending bit set\n");
goto err_out; goto err_out;
...@@ -900,8 +909,6 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi ...@@ -900,8 +909,6 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
ret = 0; ret = 0;
err_out: err_out:
preempt_enable_no_resched();
sched_out:
set_cpus_allowed(current, oldmask); set_cpus_allowed(current, oldmask);
schedule(); schedule();
......
...@@ -67,28 +67,19 @@ static struct cpufreq_frequency_table speedstep_freqs[] = { ...@@ -67,28 +67,19 @@ static struct cpufreq_frequency_table speedstep_freqs[] = {
/** /**
* speedstep_set_state - set the SpeedStep state * speedstep_set_state - set the SpeedStep state
* @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
* @notify: whether to call cpufreq_notify_transition for CPU speed changes
* *
* Tries to change the SpeedStep state. * Tries to change the SpeedStep state.
*/ */
static void speedstep_set_state (unsigned int state, unsigned int notify) static void speedstep_set_state (unsigned int state)
{ {
u32 pmbase; u32 pmbase;
u8 pm2_blk; u8 pm2_blk;
u8 value; u8 value;
unsigned long flags; unsigned long flags;
struct cpufreq_freqs freqs;
if (!speedstep_chipset_dev || (state > 0x1)) if (!speedstep_chipset_dev || (state > 0x1))
return; return;
freqs.old = speedstep_get_processor_frequency(speedstep_processor);
freqs.new = speedstep_freqs[state].frequency;
freqs.cpu = 0; /* speedstep.c is UP only driver */
if (notify)
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
/* get PMBASE */ /* get PMBASE */
pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase); pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase);
if (!(pmbase & 0x01)) if (!(pmbase & 0x01))
...@@ -143,9 +134,6 @@ static void speedstep_set_state (unsigned int state, unsigned int notify) ...@@ -143,9 +134,6 @@ static void speedstep_set_state (unsigned int state, unsigned int notify)
printk (KERN_ERR "cpufreq: change failed - I/O error\n"); printk (KERN_ERR "cpufreq: change failed - I/O error\n");
} }
if (notify)
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
return; return;
} }
...@@ -252,11 +240,47 @@ static int speedstep_target (struct cpufreq_policy *policy, ...@@ -252,11 +240,47 @@ static int speedstep_target (struct cpufreq_policy *policy,
unsigned int relation) unsigned int relation)
{ {
unsigned int newstate = 0; unsigned int newstate = 0;
struct cpufreq_freqs freqs;
cpumask_t cpus_allowed, affected_cpu_map;
int i;
if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate))
return -EINVAL; return -EINVAL;
speedstep_set_state(newstate, 1); /* no transition necessary */
if (freqs.old == freqs.new)
return 0;
freqs.old = speedstep_get_processor_frequency(speedstep_processor);
freqs.new = speedstep_freqs[newstate].frequency;
freqs.cpu = policy->cpu;
cpus_allowed = current->cpus_allowed;
/* only run on CPU to be set, or on its sibling */
#ifdef CONFIG_SMP
affected_cpu_map = cpu_sibling_map[policy->cpu];
#else
affected_cpu_map = cpumask_of_cpu(policy->cpu);
#endif
for_each_cpu_mask(i, affected_cpu_map) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
}
/* switch to physical CPU where state is to be changed */
set_cpus_allowed(current, affected_cpu_map);
speedstep_set_state(newstate);
/* allow to be run on all CPUs */
set_cpus_allowed(current, cpus_allowed);
for_each_cpu_mask(i, affected_cpu_map) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
return 0; return 0;
} }
...@@ -279,21 +303,35 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) ...@@ -279,21 +303,35 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
{ {
int result = 0; int result = 0;
unsigned int speed; unsigned int speed;
cpumask_t cpus_allowed,affected_cpu_map;
/* capability check */ /* capability check */
if (policy->cpu != 0) if (policy->cpu != 0) /* FIXME: better support for SMT in cpufreq core. Up until then, it's better to register only one CPU */
return -ENODEV; return -ENODEV;
/* only run on CPU to be set, or on its sibling */
cpus_allowed = current->cpus_allowed;
#ifdef CONFIG_SMP
affected_cpu_map = cpu_sibling_map[policy->cpu];
#else
affected_cpu_map = cpumask_of_cpu(policy->cpu);
#endif
set_cpus_allowed(current, affected_cpu_map);
/* detect low and high frequency */ /* detect low and high frequency */
result = speedstep_get_freqs(speedstep_processor, result = speedstep_get_freqs(speedstep_processor,
&speedstep_freqs[SPEEDSTEP_LOW].frequency, &speedstep_freqs[SPEEDSTEP_LOW].frequency,
&speedstep_freqs[SPEEDSTEP_HIGH].frequency, &speedstep_freqs[SPEEDSTEP_HIGH].frequency,
&speedstep_set_state); &speedstep_set_state);
if (result) if (result) {
set_cpus_allowed(current, cpus_allowed);
return result; return result;
}
/* get current speed setting */ /* get current speed setting */
speed = speedstep_get_processor_frequency(speedstep_processor); speed = speedstep_get_processor_frequency(speedstep_processor);
set_cpus_allowed(current, cpus_allowed);
if (!speed) if (!speed)
return -EIO; return -EIO;
......
...@@ -252,11 +252,10 @@ unsigned int speedstep_detect_processor (void) ...@@ -252,11 +252,10 @@ unsigned int speedstep_detect_processor (void)
* specific. * specific.
* M-P4-Ms may have either ebx=0xe or 0xf [see above] * M-P4-Ms may have either ebx=0xe or 0xf [see above]
* M-P4/533 have either ebx=0xe or 0xf. [25317607.pdf] * M-P4/533 have either ebx=0xe or 0xf. [25317607.pdf]
* So, how to distinguish all those processors with * also, M-P4M HTs have ebx=0x8, too
* ebx=0xf? I don't know. Sort them out, and wait * For now, they are distinguished by the model_id string
* for someone to complain.
*/ */
if (ebx == 0x0e) if ((ebx == 0x0e) || (strstr(c->x86_model_id,"Mobile Intel(R) Pentium(R) 4") != NULL))
return SPEEDSTEP_PROCESSOR_P4M; return SPEEDSTEP_PROCESSOR_P4M;
break; break;
default: default:
...@@ -321,9 +320,7 @@ EXPORT_SYMBOL_GPL(speedstep_detect_processor); ...@@ -321,9 +320,7 @@ EXPORT_SYMBOL_GPL(speedstep_detect_processor);
unsigned int speedstep_get_freqs(unsigned int processor, unsigned int speedstep_get_freqs(unsigned int processor,
unsigned int *low_speed, unsigned int *low_speed,
unsigned int *high_speed, unsigned int *high_speed,
void (*set_state) (unsigned int state, void (*set_state) (unsigned int state))
unsigned int notify)
)
{ {
unsigned int prev_speed; unsigned int prev_speed;
unsigned int ret = 0; unsigned int ret = 0;
...@@ -340,7 +337,7 @@ unsigned int speedstep_get_freqs(unsigned int processor, ...@@ -340,7 +337,7 @@ unsigned int speedstep_get_freqs(unsigned int processor,
local_irq_save(flags); local_irq_save(flags);
/* switch to low state */ /* switch to low state */
set_state(SPEEDSTEP_LOW, 0); set_state(SPEEDSTEP_LOW);
*low_speed = speedstep_get_processor_frequency(processor); *low_speed = speedstep_get_processor_frequency(processor);
if (!*low_speed) { if (!*low_speed) {
ret = -EIO; ret = -EIO;
...@@ -348,7 +345,7 @@ unsigned int speedstep_get_freqs(unsigned int processor, ...@@ -348,7 +345,7 @@ unsigned int speedstep_get_freqs(unsigned int processor,
} }
/* switch to high state */ /* switch to high state */
set_state(SPEEDSTEP_HIGH, 0); set_state(SPEEDSTEP_HIGH);
*high_speed = speedstep_get_processor_frequency(processor); *high_speed = speedstep_get_processor_frequency(processor);
if (!*high_speed) { if (!*high_speed) {
ret = -EIO; ret = -EIO;
...@@ -362,7 +359,7 @@ unsigned int speedstep_get_freqs(unsigned int processor, ...@@ -362,7 +359,7 @@ unsigned int speedstep_get_freqs(unsigned int processor,
/* switch to previous state, if necessary */ /* switch to previous state, if necessary */
if (*high_speed != prev_speed) if (*high_speed != prev_speed)
set_state(SPEEDSTEP_LOW, 0); set_state(SPEEDSTEP_LOW);
out: out:
local_irq_restore(flags); local_irq_restore(flags);
......
...@@ -44,4 +44,4 @@ extern unsigned int speedstep_get_processor_frequency(unsigned int processor); ...@@ -44,4 +44,4 @@ extern unsigned int speedstep_get_processor_frequency(unsigned int processor);
extern unsigned int speedstep_get_freqs(unsigned int processor, extern unsigned int speedstep_get_freqs(unsigned int processor,
unsigned int *low_speed, unsigned int *low_speed,
unsigned int *high_speed, unsigned int *high_speed,
void (*set_state) (unsigned int state, unsigned int notify)); void (*set_state) (unsigned int state));
...@@ -139,37 +139,24 @@ static int speedstep_get_state (void) ...@@ -139,37 +139,24 @@ static int speedstep_get_state (void)
: "a" (command), "b" (function), "c" (0), "d" (smi_port), "S" (0) : "a" (command), "b" (function), "c" (0), "d" (smi_port), "S" (0)
); );
return state; return (state & 1);
} }
/** /**
* speedstep_set_state - set the SpeedStep state * speedstep_set_state - set the SpeedStep state
* @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
* @notify: whether to call cpufreq_notify_transition
* *
*/ */
static void speedstep_set_state (unsigned int state, unsigned int notify) static void speedstep_set_state (unsigned int state)
{ {
unsigned int old_state, result = 0, command, new_state; unsigned int result = 0, command, new_state;
unsigned long flags; unsigned long flags;
struct cpufreq_freqs freqs;
unsigned int function=SET_SPEEDSTEP_STATE; unsigned int function=SET_SPEEDSTEP_STATE;
unsigned int retry = 0; unsigned int retry = 0;
if (state > 0x1) if (state > 0x1)
return; return;
old_state = speedstep_get_state();
freqs.old = speedstep_freqs[old_state].frequency;
freqs.new = speedstep_freqs[state].frequency;
freqs.cpu = 0; /* speedstep.c is UP only driver */
if (old_state == state)
return;
if (notify)
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
/* Disable IRQs */ /* Disable IRQs */
local_irq_save(flags); local_irq_save(flags);
...@@ -198,9 +185,6 @@ static void speedstep_set_state (unsigned int state, unsigned int notify) ...@@ -198,9 +185,6 @@ static void speedstep_set_state (unsigned int state, unsigned int notify)
printk(KERN_ERR "cpufreq: change failed with new_state %u and result %u\n", new_state, result); printk(KERN_ERR "cpufreq: change failed with new_state %u and result %u\n", new_state, result);
} }
if (notify)
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
return; return;
} }
...@@ -217,11 +201,21 @@ static int speedstep_target (struct cpufreq_policy *policy, ...@@ -217,11 +201,21 @@ static int speedstep_target (struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation) unsigned int target_freq, unsigned int relation)
{ {
unsigned int newstate = 0; unsigned int newstate = 0;
struct cpufreq_freqs freqs;
if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate))
return -EINVAL; return -EINVAL;
speedstep_set_state(newstate, 1); freqs.old = speedstep_freqs[speedstep_get_state()].frequency;
freqs.new = speedstep_freqs[newstate].frequency;
freqs.cpu = 0; /* speedstep.c is UP only driver */
if (freqs.old == freqs.new)
return 0;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
speedstep_set_state(newstate);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
return 0; return 0;
} }
......
...@@ -722,7 +722,12 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, ...@@ -722,7 +722,12 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int target_freq,
unsigned int relation) unsigned int relation)
{ {
return cpufreq_driver->target(policy, target_freq, relation); int retval = -EINVAL;
lock_cpu_hotplug();
if (cpu_online(policy->cpu))
retval = cpufreq_driver->target(policy, target_freq, relation);
unlock_cpu_hotplug();
return retval;
} }
EXPORT_SYMBOL_GPL(__cpufreq_driver_target); EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
......
...@@ -53,25 +53,31 @@ static const char version[] = ...@@ -53,25 +53,31 @@ static const char version[] =
#ifdef __arm__ #ifdef __arm__
static void write_rreg(u_long base, u_int reg, u_int val) static void write_rreg(u_long base, u_int reg, u_int val)
{ {
__asm__("str%?h %1, [%2] @ NET_RAP __asm__(
str%?h %0, [%2, #-4] @ NET_RDP "str%?h %1, [%2] @ NET_RAP\n\t"
" : : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); "str%?h %0, [%2, #-4] @ NET_RDP"
:
: "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
} }
static inline unsigned short read_rreg(u_long base_addr, u_int reg) static inline unsigned short read_rreg(u_long base_addr, u_int reg)
{ {
unsigned short v; unsigned short v;
__asm__("str%?h %1, [%2] @ NET_RAP __asm__(
ldr%?h %0, [%2, #-4] @ NET_RDP "str%?h %1, [%2] @ NET_RAP\n\t"
" : "=r" (v): "r" (reg), "r" (ISAIO_BASE + 0x0464)); "ldr%?h %0, [%2, #-4] @ NET_RDP"
: "=r" (v)
: "r" (reg), "r" (ISAIO_BASE + 0x0464));
return v; return v;
} }
static inline void write_ireg(u_long base, u_int reg, u_int val) static inline void write_ireg(u_long base, u_int reg, u_int val)
{ {
__asm__("str%?h %1, [%2] @ NET_RAP __asm__(
str%?h %0, [%2, #8] @ NET_IDP "str%?h %1, [%2] @ NET_RAP\n\t"
" : : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); "str%?h %0, [%2, #8] @ NET_IDP"
:
: "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
} }
static inline unsigned short read_ireg(u_long base_addr, u_int reg) static inline unsigned short read_ireg(u_long base_addr, u_int reg)
...@@ -101,15 +107,15 @@ am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigne ...@@ -101,15 +107,15 @@ am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigne
} }
while (length > 8) { while (length > 8) {
unsigned int tmp, tmp2; unsigned int tmp, tmp2;
__asm__ __volatile__(" __asm__ __volatile__(
ldm%?ia %1!, {%2, %3} "ldm%?ia %1!, {%2, %3}\n\t"
str%?h %2, [%0], #4 "str%?h %2, [%0], #4\n\t"
mov%? %2, %2, lsr #16 "mov%? %2, %2, lsr #16\n\t"
str%?h %2, [%0], #4 "str%?h %2, [%0], #4\n\t"
str%?h %3, [%0], #4 "str%?h %3, [%0], #4\n\t"
mov%? %3, %3, lsr #16 "mov%? %3, %3, lsr #16\n\t"
str%?h %3, [%0], #4 "str%?h %3, [%0], #4"
" : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2) : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2)
: "0" (offset), "1" (buf)); : "0" (offset), "1" (buf));
length -= 8; length -= 8;
} }
...@@ -128,36 +134,36 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned ...@@ -128,36 +134,36 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
length = (length + 1) & ~1; length = (length + 1) & ~1;
if ((int)buf & 2) { if ((int)buf & 2) {
unsigned int tmp; unsigned int tmp;
__asm__ __volatile__(" __asm__ __volatile__(
ldr%?h %2, [%0], #4 "ldr%?h %2, [%0], #4\n\t"
str%?b %2, [%1], #1 "str%?b %2, [%1], #1\n\t"
mov%? %2, %2, lsr #8 "mov%? %2, %2, lsr #8\n\t"
str%?b %2, [%1], #1 "str%?b %2, [%1], #1"
" : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf)); : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf));
length -= 2; length -= 2;
} }
while (length > 8) { while (length > 8) {
unsigned int tmp, tmp2, tmp3; unsigned int tmp, tmp2, tmp3;
__asm__ __volatile__(" __asm__ __volatile__(
ldr%?h %2, [%0], #4 "ldr%?h %2, [%0], #4\n\t"
ldr%?h %3, [%0], #4 "ldr%?h %3, [%0], #4\n\t"
orr%? %2, %2, %3, lsl #16 "orr%? %2, %2, %3, lsl #16\n\t"
ldr%?h %3, [%0], #4 "ldr%?h %3, [%0], #4\n\t"
ldr%?h %4, [%0], #4 "ldr%?h %4, [%0], #4\n\t"
orr%? %3, %3, %4, lsl #16 "orr%? %3, %3, %4, lsl #16\n\t"
stm%?ia %1!, {%2, %3} "stm%?ia %1!, {%2, %3}"
" : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3) : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3)
: "0" (offset), "1" (buf)); : "0" (offset), "1" (buf));
length -= 8; length -= 8;
} }
while (length > 0) { while (length > 0) {
unsigned int tmp; unsigned int tmp;
__asm__ __volatile__(" __asm__ __volatile__(
ldr%?h %2, [%0], #4 "ldr%?h %2, [%0], #4\n\t"
str%?b %2, [%1], #1 "str%?b %2, [%1], #1\n\t"
mov%? %2, %2, lsr #8 "mov%? %2, %2, lsr #8\n\t"
str%?b %2, [%1], #1 "str%?b %2, [%1], #1"
" : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf)); : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf));
length -= 2; length -= 2;
} }
} }
...@@ -618,6 +624,7 @@ am79c961_interrupt(int irq, void *dev_id, struct pt_regs *regs) ...@@ -618,6 +624,7 @@ am79c961_interrupt(int irq, void *dev_id, struct pt_regs *regs)
if (status & CSR0_CERR) { if (status & CSR0_CERR) {
handled = 1; handled = 1;
mod_timer(&priv->timer, jiffies); mod_timer(&priv->timer, jiffies);
}
} while (--n && status & (CSR0_RINT | CSR0_TINT)); } while (--n && status & (CSR0_RINT | CSR0_TINT));
return IRQ_RETVAL(handled); return IRQ_RETVAL(handled);
......
/* /*
* Copyright (c) International Business Machines Corp., 2000-2001 * Copyright (C) International Business Machines Corp., 2000-2004
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
...@@ -108,13 +108,12 @@ struct btpage { ...@@ -108,13 +108,12 @@ struct btpage {
* record the path traversed during the search; * record the path traversed during the search;
* top frame record the leaf page/entry selected. * top frame record the leaf page/entry selected.
*/ */
#define MAXTREEHEIGHT 8
struct btframe { /* stack frame */ struct btframe { /* stack frame */
s64 bn; /* 8: */ s64 bn; /* 8: */
s16 index; /* 2: */ s16 index; /* 2: */
s16 lastindex; /* 2: */ s16 lastindex; /* 2: unused */
struct metapage *mp; /* 4: */ struct metapage *mp; /* 4/8: */
}; /* (16) */ }; /* (16/24) */
struct btstack { struct btstack {
struct btframe *top; struct btframe *top;
...@@ -125,12 +124,15 @@ struct btstack { ...@@ -125,12 +124,15 @@ struct btstack {
#define BT_CLR(btstack)\ #define BT_CLR(btstack)\
(btstack)->top = (btstack)->stack (btstack)->top = (btstack)->stack
#define BT_STACK_FULL(btstack)\
( (btstack)->top == &((btstack)->stack[MAXTREEHEIGHT-1]))
#define BT_PUSH(BTSTACK, BN, INDEX)\ #define BT_PUSH(BTSTACK, BN, INDEX)\
{\ {\
assert(!BT_STACK_FULL(BTSTACK));\
(BTSTACK)->top->bn = BN;\ (BTSTACK)->top->bn = BN;\
(BTSTACK)->top->index = INDEX;\ (BTSTACK)->top->index = INDEX;\
++(BTSTACK)->top;\ ++(BTSTACK)->top;\
assert((BTSTACK)->top != &((BTSTACK)->stack[MAXTREEHEIGHT]));\
} }
#define BT_POP(btstack)\ #define BT_POP(btstack)\
...@@ -139,6 +141,16 @@ struct btstack { ...@@ -139,6 +141,16 @@ struct btstack {
#define BT_STACK(btstack)\ #define BT_STACK(btstack)\
( (btstack)->top == (btstack)->stack ? NULL : (btstack)->top ) ( (btstack)->top == (btstack)->stack ? NULL : (btstack)->top )
static inline void BT_STACK_DUMP(struct btstack *btstack)
{
int i;
printk("btstack dump:\n");
for (i = 0; i < MAXTREEHEIGHT; i++)
printk(KERN_ERR "bn = %Lx, index = %d\n",
btstack->stack[i].bn,
btstack->stack[i].index);
}
/* retrieve search results */ /* retrieve search results */
#define BT_GETSEARCH(IP, LEAF, BN, MP, TYPE, P, INDEX, ROOT)\ #define BT_GETSEARCH(IP, LEAF, BN, MP, TYPE, P, INDEX, ROOT)\
{\ {\
......
/* /*
* Copyright (C) International Business Machines Corp., 2000-2003 * Copyright (C) International Business Machines Corp., 2000-2004
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
...@@ -382,7 +382,7 @@ int dbFree(struct inode *ip, s64 blkno, s64 nblocks) ...@@ -382,7 +382,7 @@ int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
IREAD_LOCK(ipbmap); IREAD_LOCK(ipbmap);
/* block to be freed better be within the mapsize. */ /* block to be freed better be within the mapsize. */
if (blkno + nblocks > bmp->db_mapsize) { if (unlikely((blkno == 0) || (blkno + nblocks > bmp->db_mapsize))) {
IREAD_UNLOCK(ipbmap); IREAD_UNLOCK(ipbmap);
printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n", printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n",
(unsigned long long) blkno, (unsigned long long) blkno,
......
/* /*
* Copyright (C) International Business Machines Corp., 2000-2003 * Copyright (C) International Business Machines Corp., 2000-2004
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
...@@ -378,6 +378,8 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot) ...@@ -378,6 +378,8 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
* It's time to move the inline table to an external * It's time to move the inline table to an external
* page and begin to build the xtree * page and begin to build the xtree
*/ */
if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr))
goto clean_up; /* No space */
/* /*
* Save the table, we're going to overwrite it with the * Save the table, we're going to overwrite it with the
...@@ -394,8 +396,8 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot) ...@@ -394,8 +396,8 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
/* /*
* Allocate the first block & add it to the xtree * Allocate the first block & add it to the xtree
*/ */
xaddr = 0;
if (xtInsert(tid, ip, 0, 0, sbi->nbperpage, &xaddr, 0)) { if (xtInsert(tid, ip, 0, 0, sbi->nbperpage, &xaddr, 0)) {
/* This really shouldn't fail */
jfs_warn("add_index: xtInsert failed!"); jfs_warn("add_index: xtInsert failed!");
memcpy(&jfs_ip->i_dirtable, temp_table, memcpy(&jfs_ip->i_dirtable, temp_table,
sizeof (temp_table)); sizeof (temp_table));
...@@ -764,11 +766,12 @@ int dtSearch(struct inode *ip, struct component_name * key, ino_t * data, ...@@ -764,11 +766,12 @@ int dtSearch(struct inode *ip, struct component_name * key, ino_t * data,
*/ */
getChild: getChild:
/* update max. number of pages to split */ /* update max. number of pages to split */
if (btstack->nsplit >= 8) { if (BT_STACK_FULL(btstack)) {
/* Something's corrupted, mark filesytem dirty so /* Something's corrupted, mark filesytem dirty so
* chkdsk will fix it. * chkdsk will fix it.
*/ */
jfs_error(sb, "stack overrun in dtSearch!"); jfs_error(sb, "stack overrun in dtSearch!");
BT_STACK_DUMP(btstack);
rc = -EIO; rc = -EIO;
goto out; goto out;
} }
...@@ -975,8 +978,10 @@ static int dtSplitUp(tid_t tid, ...@@ -975,8 +978,10 @@ static int dtSplitUp(tid_t tid,
n -= DTROOTMAXSLOT - sp->header.freecnt; /* header + entries */ n -= DTROOTMAXSLOT - sp->header.freecnt; /* header + entries */
if (n <= split->nslot) if (n <= split->nslot)
xlen++; xlen++;
if ((rc = dbAlloc(ip, 0, (s64) xlen, &xaddr))) if ((rc = dbAlloc(ip, 0, (s64) xlen, &xaddr))) {
DT_PUTPAGE(smp);
goto freeKeyName; goto freeKeyName;
}
pxdlist.maxnpxd = 1; pxdlist.maxnpxd = 1;
pxdlist.npxd = 0; pxdlist.npxd = 0;
...@@ -3342,6 +3347,12 @@ static int dtReadFirst(struct inode *ip, struct btstack * btstack) ...@@ -3342,6 +3347,12 @@ static int dtReadFirst(struct inode *ip, struct btstack * btstack)
/* /*
* descend down to leftmost child page * descend down to leftmost child page
*/ */
if (BT_STACK_FULL(btstack)) {
DT_PUTPAGE(mp);
jfs_error(ip->i_sb, "dtReadFirst: btstack overrun");
BT_STACK_DUMP(btstack);
return -EIO;
}
/* push (bn, index) of the parent page/entry */ /* push (bn, index) of the parent page/entry */
BT_PUSH(btstack, bn, 0); BT_PUSH(btstack, bn, 0);
......
...@@ -239,6 +239,7 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock, ...@@ -239,6 +239,7 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
spin_unlock(&meta_lock); spin_unlock(&meta_lock);
if (test_bit(META_stale, &mp->flag)) { if (test_bit(META_stale, &mp->flag)) {
release_metapage(mp); release_metapage(mp);
yield(); /* Let other waiters release it, too */
goto again; goto again;
} }
if (test_bit(META_discard, &mp->flag)) { if (test_bit(META_discard, &mp->flag)) {
......
...@@ -1747,7 +1747,10 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, ...@@ -1747,7 +1747,10 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
if (lwm == next) if (lwm == next)
goto out; goto out;
assert(lwm < next); if (lwm > next) {
jfs_err("xtLog: lwm > next\n");
goto out;
}
tlck->flag |= tlckUPDATEMAP; tlck->flag |= tlckUPDATEMAP;
xadlock->flag = mlckALLOCXADLIST; xadlock->flag = mlckALLOCXADLIST;
xadlock->count = next - lwm; xadlock->count = next - lwm;
...@@ -1913,11 +1916,8 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, ...@@ -1913,11 +1916,8 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
/* /*
* write log records * write log records
*/ */
/*
* allocate entries XAD[lwm:next]:
*/
if (lwm < next) {
/* log after-image for logredo(): /* log after-image for logredo():
*
* logredo() will update bmap for alloc of new/extended * logredo() will update bmap for alloc of new/extended
* extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from * extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
* after-image of XADlist; * after-image of XADlist;
...@@ -1926,12 +1926,8 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, ...@@ -1926,12 +1926,8 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
*/ */
lrd->type = cpu_to_le16(LOG_REDOPAGE); lrd->type = cpu_to_le16(LOG_REDOPAGE);
PXDaddress(pxd, mp->index); PXDaddress(pxd, mp->index);
PXDlength(pxd, PXDlength(pxd, mp->logical_size >> tblk->sb->s_blocksize_bits);
mp->logical_size >> tblk->sb-> lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
s_blocksize_bits);
lrd->backchain =
cpu_to_le32(lmLog(log, tblk, lrd, tlck));
}
/* /*
* truncate entry XAD[twm == next - 1]: * truncate entry XAD[twm == next - 1]:
...@@ -2624,6 +2620,7 @@ void txAbort(tid_t tid, int dirty) ...@@ -2624,6 +2620,7 @@ void txAbort(tid_t tid, int dirty)
lid_t lid, next; lid_t lid, next;
struct metapage *mp; struct metapage *mp;
struct tblock *tblk = tid_to_tblock(tid); struct tblock *tblk = tid_to_tblock(tid);
struct tlock *tlck;
jfs_warn("txAbort: tid:%d dirty:0x%x", tid, dirty); jfs_warn("txAbort: tid:%d dirty:0x%x", tid, dirty);
...@@ -2631,9 +2628,10 @@ void txAbort(tid_t tid, int dirty) ...@@ -2631,9 +2628,10 @@ void txAbort(tid_t tid, int dirty)
* free tlocks of the transaction * free tlocks of the transaction
*/ */
for (lid = tblk->next; lid; lid = next) { for (lid = tblk->next; lid; lid = next) {
next = lid_to_tlock(lid)->next; tlck = lid_to_tlock(lid);
next = tlck->next;
mp = lid_to_tlock(lid)->mp; mp = tlck->mp;
JFS_IP(tlck->ip)->xtlid = 0;
if (mp) { if (mp) {
mp->lid = 0; mp->lid = 0;
......
...@@ -113,11 +113,12 @@ typedef struct { ...@@ -113,11 +113,12 @@ typedef struct {
#define addressPXD(pxd)\ #define addressPXD(pxd)\
( ((s64)((pxd)->addr1)) << 32 | __le32_to_cpu((pxd)->addr2)) ( ((s64)((pxd)->addr1)) << 32 | __le32_to_cpu((pxd)->addr2))
#define MAXTREEHEIGHT 8
/* pxd list */ /* pxd list */
struct pxdlist { struct pxdlist {
s16 maxnpxd; s16 maxnpxd;
s16 npxd; s16 npxd;
pxd_t pxd[8]; pxd_t pxd[MAXTREEHEIGHT];
}; };
......
...@@ -858,7 +858,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, ...@@ -858,7 +858,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
unchar *i_fastsymlink; unchar *i_fastsymlink;
s64 xlen = 0; s64 xlen = 0;
int bmask = 0, xsize; int bmask = 0, xsize;
s64 xaddr; s64 extent = 0, xaddr;
struct metapage *mp; struct metapage *mp;
struct super_block *sb; struct super_block *sb;
struct tblock *tblk; struct tblock *tblk;
...@@ -892,29 +892,11 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, ...@@ -892,29 +892,11 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
down(&JFS_IP(dip)->commit_sem); down(&JFS_IP(dip)->commit_sem);
down(&JFS_IP(ip)->commit_sem); down(&JFS_IP(ip)->commit_sem);
if ((rc = dtSearch(dip, &dname, &ino, &btstack, JFS_CREATE)))
goto out3;
tblk = tid_to_tblock(tid); tblk = tid_to_tblock(tid);
tblk->xflag |= COMMIT_CREATE; tblk->xflag |= COMMIT_CREATE;
tblk->ino = ip->i_ino; tblk->ino = ip->i_ino;
tblk->u.ixpxd = JFS_IP(ip)->ixpxd; tblk->u.ixpxd = JFS_IP(ip)->ixpxd;
/*
* create entry for symbolic link in parent directory
*/
ino = ip->i_ino;
if ((rc = dtInsert(tid, dip, &dname, &ino, &btstack))) {
jfs_err("jfs_symlink: dtInsert returned %d", rc);
/* discard ne inode */
goto out3;
}
/* fix symlink access permission /* fix symlink access permission
* (dir_create() ANDs in the u.u_cmask, * (dir_create() ANDs in the u.u_cmask,
* but symlinks really need to be 777 access) * but symlinks really need to be 777 access)
...@@ -966,37 +948,48 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, ...@@ -966,37 +948,48 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
xsize = (ssize + bmask) & ~bmask; xsize = (ssize + bmask) & ~bmask;
xaddr = 0; xaddr = 0;
xlen = xsize >> JFS_SBI(sb)->l2bsize; xlen = xsize >> JFS_SBI(sb)->l2bsize;
if ((rc = xtInsert(tid, ip, 0, 0, xlen, &xaddr, 0)) == 0) { if ((rc = xtInsert(tid, ip, 0, 0, xlen, &xaddr, 0))) {
txAbort(tid, 0);
rc = -ENOSPC;
goto out3;
}
extent = xaddr;
ip->i_size = ssize - 1; ip->i_size = ssize - 1;
while (ssize) { while (ssize) {
/* This is kind of silly since PATH_MAX == 4K */
int copy_size = min(ssize, PSIZE); int copy_size = min(ssize, PSIZE);
mp = get_metapage(ip, xaddr, PSIZE, 1); mp = get_metapage(ip, xaddr, PSIZE, 1);
if (mp == NULL) { if (mp == NULL) {
dtDelete(tid, dip, &dname, &ino, dbFree(ip, extent, xlen);
JFS_REMOVE);
rc = -EIO; rc = -EIO;
txAbort(tid, 0);
goto out3; goto out3;
} }
memcpy(mp->data, name, copy_size); memcpy(mp->data, name, copy_size);
flush_metapage(mp); flush_metapage(mp);
#if 0
set_buffer_uptodate(bp);
mark_buffer_dirty(bp, 1);
if (IS_SYNC(dip))
sync_dirty_buffer(bp);
brelse(bp);
#endif /* 0 */
ssize -= copy_size; ssize -= copy_size;
name += copy_size;
xaddr += JFS_SBI(sb)->nbperpage; xaddr += JFS_SBI(sb)->nbperpage;
} }
ip->i_blocks = LBLK2PBLK(sb, xlen); ip->i_blocks = LBLK2PBLK(sb, xlen);
} else {
dtDelete(tid, dip, &dname, &ino, JFS_REMOVE);
rc = -ENOSPC;
goto out3;
} }
/*
* create entry for symbolic link in parent directory
*/
rc = dtSearch(dip, &dname, &ino, &btstack, JFS_CREATE);
if (rc == 0) {
ino = ip->i_ino;
rc = dtInsert(tid, dip, &dname, &ino, &btstack);
}
if (rc) {
if (xlen)
dbFree(ip, extent, xlen);
txAbort(tid, 0);
/* discard new inode */
goto out3;
} }
insert_inode_hash(ip); insert_inode_hash(ip);
...@@ -1004,23 +997,11 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, ...@@ -1004,23 +997,11 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
/* /*
* commit update of parent directory and link object * commit update of parent directory and link object
*
* if extent allocation failed (ENOSPC),
* the parent inode is committed regardless to avoid
* backing out parent directory update (by dtInsert())
* and subsequent dtDelete() which is harmless wrt
* integrity concern.
* the symlink inode will be freed by iput() at exit
* as it has a zero link count (by dtDelete()) and
* no permanant resources.
*/ */
iplist[0] = dip; iplist[0] = dip;
if (rc == 0) {
iplist[1] = ip; iplist[1] = ip;
rc = txCommit(tid, 2, &iplist[0], 0); rc = txCommit(tid, 2, &iplist[0], 0);
} else
rc = txCommit(tid, 1, &iplist[0], 0);
out3: out3:
txEnd(tid); txEnd(tid);
...@@ -1223,7 +1204,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, ...@@ -1223,7 +1204,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
/* Linelock header of dtree */ /* Linelock header of dtree */
tlck = txLock(tid, old_ip, tlck = txLock(tid, old_ip,
(struct metapage *) &JFS_IP(old_ip)->bxflag, (struct metapage *) &JFS_IP(old_ip)->bxflag,
tlckDTREE | tlckBTROOT); tlckDTREE | tlckBTROOT | tlckRELINK);
dtlck = (struct dt_lock *) & tlck->lock; dtlck = (struct dt_lock *) & tlck->lock;
ASSERT(dtlck->index == 0); ASSERT(dtlck->index == 0);
lv = & dtlck->lv[0]; lv = & dtlck->lv[0];
......
...@@ -688,17 +688,26 @@ static int can_set_system_xattr(struct inode *inode, const char *name, ...@@ -688,17 +688,26 @@ static int can_set_system_xattr(struct inode *inode, const char *name,
} }
inode->i_mode = mode; inode->i_mode = mode;
mark_inode_dirty(inode); mark_inode_dirty(inode);
if (rc == 0)
value = NULL;
} }
/* /*
* We're changing the ACL. Get rid of the cached one * We're changing the ACL. Get rid of the cached one
*/ */
acl =JFS_IP(inode)->i_acl; acl =JFS_IP(inode)->i_acl;
if (acl && (acl != JFS_ACL_NOT_CACHED)) if (acl != JFS_ACL_NOT_CACHED)
posix_acl_release(acl); posix_acl_release(acl);
JFS_IP(inode)->i_acl = JFS_ACL_NOT_CACHED; JFS_IP(inode)->i_acl = JFS_ACL_NOT_CACHED;
return 0;
} else if (strcmp(name, XATTR_NAME_ACL_DEFAULT) == 0) { } else if (strcmp(name, XATTR_NAME_ACL_DEFAULT) == 0) {
acl = posix_acl_from_xattr(value, value_len);
if (IS_ERR(acl)) {
rc = PTR_ERR(acl);
printk(KERN_ERR "posix_acl_from_xattr returned %d\n",
rc);
return rc;
}
posix_acl_release(acl);
/* /*
* We're changing the default ACL. Get rid of the cached one * We're changing the default ACL. Get rid of the cached one
*/ */
...@@ -706,13 +715,11 @@ static int can_set_system_xattr(struct inode *inode, const char *name, ...@@ -706,13 +715,11 @@ static int can_set_system_xattr(struct inode *inode, const char *name,
if (acl && (acl != JFS_ACL_NOT_CACHED)) if (acl && (acl != JFS_ACL_NOT_CACHED))
posix_acl_release(acl); posix_acl_release(acl);
JFS_IP(inode)->i_default_acl = JFS_ACL_NOT_CACHED; JFS_IP(inode)->i_default_acl = JFS_ACL_NOT_CACHED;
} else
/* Invalid xattr name */
return -EINVAL;
return 0; return 0;
#else /* CONFIG_JFS_POSIX_ACL */ }
return -EOPNOTSUPP;
#endif /* CONFIG_JFS_POSIX_ACL */ #endif /* CONFIG_JFS_POSIX_ACL */
return -EOPNOTSUPP;
} }
static int can_set_xattr(struct inode *inode, const char *name, static int can_set_xattr(struct inode *inode, const char *name,
......
...@@ -314,7 +314,7 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc) ...@@ -314,7 +314,7 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
if (err >= 0) { if (err >= 0) {
err = 0; err = 0;
if (wbc->for_reclaim) if (wbc->for_reclaim)
err = WRITEPAGE_ACTIVATE; nfs_flush_inode(inode, 0, 0, FLUSH_STABLE);
} }
} else { } else {
err = nfs_writepage_sync(NULL, inode, page, 0, err = nfs_writepage_sync(NULL, inode, page, 0,
...@@ -327,7 +327,6 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc) ...@@ -327,7 +327,6 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
} }
unlock_kernel(); unlock_kernel();
out: out:
if (err != WRITEPAGE_ACTIVATE)
unlock_page(page); unlock_page(page);
if (inode_referenced) if (inode_referenced)
iput(inode); iput(inode);
......
...@@ -50,7 +50,7 @@ void __writel(u32 val, void *addr); ...@@ -50,7 +50,7 @@ void __writel(u32 val, void *addr);
#define writew(v,b) __writew(v,b) #define writew(v,b) __writew(v,b)
#define writel(v,b) __writel(v,b) #define writel(v,b) __writel(v,b)
#define __arch_ioremap(cookie,sz,c) ((void *)(cookie)) #define __arch_ioremap(cookie,sz,c,a) ((void *)(cookie))
#define __arch_iounmap(cookie) do { } while (0) #define __arch_iounmap(cookie) do { } while (0)
#endif #endif
...@@ -25,13 +25,13 @@ static inline void arch_idle(void) ...@@ -25,13 +25,13 @@ static inline void arch_idle(void)
const char *irq_stat = (char *)0xff000000; const char *irq_stat = (char *)0xff000000;
/* disable clock switching */ /* disable clock switching */
asm volatile ("mcr%? p15, 0, ip, c15, c2, 2"); asm volatile ("mcr p15, 0, ip, c15, c2, 2" : : : "cc");
/* wait for an interrupt to occur */ /* wait for an interrupt to occur */
while (!*irq_stat); while (!*irq_stat);
/* enable clock switching */ /* enable clock switching */
asm volatile ("mcr%? p15, 0, ip, c15, c1, 2"); asm volatile ("mcr p15, 0, ip, c15, c1, 2" : : : "cc");
} }
#define arch_reset(mode) cpu_reset(0x80000000) #define arch_reset(mode) cpu_reset(0x80000000)
......
...@@ -15,5 +15,5 @@ ...@@ -15,5 +15,5 @@
* This is therefore not used to calculate the * This is therefore not used to calculate the
* divisor. * divisor.
*/ */
//#define CLOCK_TICK_RATE 2000000 #define CLOCK_TICK_RATE 47894000
...@@ -13,26 +13,26 @@ ...@@ -13,26 +13,26 @@
*/ */
static void puts(const char *s) static void puts(const char *s)
{ {
__asm__ __volatile__(" __asm__ __volatile__(
ldrb %0, [%2], #1 "ldrb %0, [%2], #1\n"
teq %0, #0 " teq %0, #0\n"
beq 3f " beq 3f\n"
1: strb %0, [%3] "1: strb %0, [%3]\n"
2: ldrb %1, [%3, #0x14] "2: ldrb %1, [%3, #0x14]\n"
and %1, %1, #0x60 " and %1, %1, #0x60\n"
teq %1, #0x60 " teq %1, #0x60\n"
bne 2b " bne 2b\n"
teq %0, #'\n' " teq %0, #'\n'\n"
moveq %0, #'\r' " moveq %0, #'\r'\n"
beq 1b " beq 1b\n"
ldrb %0, [%2], #1 " ldrb %0, [%2], #1\n"
teq %0, #0 " teq %0, #0\n"
bne 1b " bne 1b\n"
3: ldrb %1, [%3, #0x14] "3: ldrb %1, [%3, #0x14]\n"
and %1, %1, #0x60 " and %1, %1, #0x60\n"
teq %1, #0x60 " teq %1, #0x60\n"
bne 3b " bne 3b"
" : : "r" (0), "r" (0), "r" (s), "r" (0xf0000be0) : "cc"); : : "r" (0), "r" (0), "r" (s), "r" (0xf0000be0) : "cc");
} }
/* /*
......
...@@ -307,7 +307,7 @@ static inline void out_le32(volatile unsigned *addr, int val) ...@@ -307,7 +307,7 @@ static inline void out_le32(volatile unsigned *addr, int val)
static inline void out_be32(volatile unsigned *addr, int val) static inline void out_be32(volatile unsigned *addr, int val)
{ {
__asm__ __volatile__("stw%U0%X0 %1,%0; eieio" __asm__ __volatile__("stw%U0%X0 %1,%0; sync"
: "=m" (*addr) : "r" (val)); : "=m" (*addr) : "r" (val));
} }
...@@ -356,9 +356,9 @@ static inline void out_le64(volatile unsigned long *addr, unsigned long val) ...@@ -356,9 +356,9 @@ static inline void out_le64(volatile unsigned long *addr, unsigned long val)
: "=&r" (tmp) , "=&r" (val) : "1" (val) , "b" (addr) , "m" (*addr)); : "=&r" (tmp) , "=&r" (val) : "1" (val) , "b" (addr) , "m" (*addr));
} }
static inline void out_be64(volatile unsigned long *addr, int val) static inline void out_be64(volatile unsigned long *addr, unsigned long val)
{ {
__asm__ __volatile__("std %1,0(%0); sync" : "=m" (*addr) : "r" (val)); __asm__ __volatile__("std%U0%X0 %1,%0; sync" : "=m" (*addr) : "r" (val));
} }
#ifndef CONFIG_PPC_ISERIES #ifndef CONFIG_PPC_ISERIES
......
...@@ -839,7 +839,7 @@ shrink_zone(struct zone *zone, struct scan_control *sc) ...@@ -839,7 +839,7 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
if (count >= SWAP_CLUSTER_MAX) { if (count >= SWAP_CLUSTER_MAX) {
atomic_set(&zone->nr_scan_inactive, 0); atomic_set(&zone->nr_scan_inactive, 0);
sc->nr_to_scan = count; sc->nr_to_scan = count;
return shrink_cache(zone, sc); shrink_cache(zone, sc);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment