Commit 2ec980e0 authored by David S. Miller's avatar David S. Miller

Merge nuts.davemloft.net:/disk1/BK/network-2.6

into nuts.davemloft.net:/disk1/BK/net-2.6
parents 8ec72682 ca6c5d1f
......@@ -279,8 +279,8 @@ cc-option-yn = $(shell if $(CC) $(CFLAGS) $(1) -S -o /dev/null -xc /dev/null \
# cc-version
# Usage gcc-ver := $(call cc-version $(CC))
cc-version = $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh \
$(if $(1), $(1), $(CC))
cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh \
$(if $(1), $(1), $(CC)))
# Look for make include files relative to root of kernel src
......
......@@ -38,6 +38,17 @@
#define PFX "longhaul: "
#define TYPE_LONGHAUL_V1 1
#define TYPE_LONGHAUL_V2 2
#define TYPE_POWERSAVER 3
#define CPU_SAMUEL 1
#define CPU_SAMUEL2 2
#define CPU_EZRA 3
#define CPU_EZRA_T 4
#define CPU_NEHEMIAH 5
static int cpu_model;
static unsigned int numscales=16, numvscales;
static unsigned int fsb;
static int minvid, maxvid;
......@@ -73,9 +84,23 @@ static int voltage_table[32];
static unsigned int highest_speed, lowest_speed; /* kHz */
static int longhaul_version;
static struct cpufreq_frequency_table *longhaul_table;
static char speedbuffer[8];
static char *print_speed(int speed)
{
if (speed > 1000) {
if (speed%1000 == 0)
sprintf (speedbuffer, "%dGHz", speed/1000);
else
sprintf (speedbuffer, "%d.%dGHz", speed/1000, (speed%1000)/100);
} else
sprintf (speedbuffer, "%dMHz", speed);
return speedbuffer;
}
static unsigned int calc_speed(int mult, int fsb)
static unsigned int calc_speed(int mult)
{
int khz;
khz = (mult/10)*fsb;
......@@ -92,7 +117,7 @@ static int longhaul_get_cpu_mult(void)
rdmsr (MSR_IA32_EBL_CR_POWERON, lo, hi);
invalue = (lo & (1<<22|1<<23|1<<24|1<<25)) >>22;
if (longhaul_version==2 || longhaul_version==3) {
if (longhaul_version==TYPE_LONGHAUL_V2 || longhaul_version==TYPE_POWERSAVER) {
if (lo & (1<<27))
invalue+=16;
}
......@@ -101,8 +126,21 @@ static int longhaul_get_cpu_mult(void)
static void do_powersaver(union msr_longhaul *longhaul,
unsigned int clock_ratio_index, int version)
unsigned int clock_ratio_index)
{
int version;
switch (cpu_model) {
case CPU_EZRA_T:
version = 3;
break;
case CPU_NEHEMIAH:
version = 0xf;
break;
default:
return;
}
rdmsrl(MSR_VIA_LONGHAUL, longhaul->val);
longhaul->bits.SoftBusRatio = clock_ratio_index & 0xf;
longhaul->bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
......@@ -125,7 +163,7 @@ static void do_powersaver(union msr_longhaul *longhaul,
* longhaul_set_cpu_frequency()
* @clock_ratio_index : bitpattern of the new multiplier.
*
* Sets a new clock ratio, and -if applicable- a new Front Side Bus
* Sets a new clock ratio.
*/
static void longhaul_setstate(unsigned int clock_ratio_index)
......@@ -134,22 +172,28 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
struct cpufreq_freqs freqs;
union msr_longhaul longhaul;
union msr_bcr2 bcr2;
static unsigned int old_ratio=-1;
if (old_ratio == clock_ratio_index)
return;
old_ratio = clock_ratio_index;
mult = clock_ratio[clock_ratio_index];
if (mult == -1)
return;
speed = calc_speed (mult, fsb);
speed = calc_speed(mult);
if ((speed > highest_speed) || (speed < lowest_speed))
return;
freqs.old = calc_speed (longhaul_get_cpu_mult(), fsb);
freqs.old = calc_speed(longhaul_get_cpu_mult());
freqs.new = speed;
freqs.cpu = 0; /* longhaul.c is UP only driver */
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
dprintk (KERN_INFO PFX "FSB:%d Mult:%d.%dx\n", fsb, mult/10, mult%10);
dprintk (KERN_INFO PFX "Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
fsb, mult/10, mult%10, print_speed(speed/1000));
switch (longhaul_version) {
......@@ -160,7 +204,8 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
* *NB* Until we get voltage scaling working v1 & v2 are the same code.
* Longhaul v2 appears in Samuel2 Steppings 1->7 [C5b] and Ezra [C5C]
*/
case 1:
case TYPE_LONGHAUL_V1:
case TYPE_LONGHAUL_V2:
rdmsrl (MSR_VIA_BCR2, bcr2.val);
/* Enable software clock multiplier */
bcr2.bits.ESOFTBF = 1;
......@@ -180,26 +225,18 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
break;
/*
* Longhaul v3 (aka Powersaver). (Ezra-T [C5M])
* Longhaul v3 (aka Powersaver). (Ezra-T [C5M] & Nehemiah [C5N])
* We can scale voltage with this too, but that's currently
* disabled until we come up with a decent 'match freq to voltage'
* algorithm.
* When we add voltage scaling, we will also need to do the
* voltage/freq setting in order depending on the direction
* of scaling (like we do in powernow-k7.c)
*/
case 2:
do_powersaver(&longhaul, clock_ratio_index, 3);
break;
/*
* Powersaver. (Nehemiah [C5N])
* As for Ezra-T, we don't do voltage yet.
* This can do FSB scaling too, but it has never been proven
* Nehemiah can do FSB scaling too, but this has never been proven
* to work in practice.
*/
case 3:
do_powersaver(&longhaul, clock_ratio_index, 0xf);
case TYPE_POWERSAVER:
do_powersaver(&longhaul, clock_ratio_index);
break;
}
......@@ -249,7 +286,6 @@ static int guess_fsb(void)
static int __init longhaul_get_ranges(void)
{
struct cpuinfo_x86 *c = cpu_data;
unsigned long invalue;
unsigned int multipliers[32]= {
50,30,40,100,55,35,45,95,90,70,80,60,120,75,85,65,
......@@ -261,22 +297,24 @@ static int __init longhaul_get_ranges(void)
unsigned int eblcr_fsb_table_v2[] = { 133, 100, -1, 66 };
switch (longhaul_version) {
case 1:
case TYPE_LONGHAUL_V1:
case TYPE_LONGHAUL_V2:
/* Ugh, Longhaul v1 didn't have the min/max MSRs.
Assume min=3.0x & max = whatever we booted at. */
minmult = 30;
maxmult = longhaul_get_cpu_mult();
rdmsr (MSR_IA32_EBL_CR_POWERON, lo, hi);
invalue = (lo & (1<<18|1<<19)) >>18;
if (c->x86_model==6)
if (cpu_model==CPU_SAMUEL || cpu_model==CPU_SAMUEL2)
fsb = eblcr_fsb_table_v1[invalue];
else
fsb = guess_fsb();
break;
case 2:
case TYPE_POWERSAVER:
/* Ezra-T */
if (cpu_model==CPU_EZRA_T) {
rdmsrl (MSR_VIA_LONGHAUL, longhaul.val);
invalue = longhaul.bits.MaxMHzBR;
if (longhaul.bits.MaxMHzBR4)
invalue += 16;
......@@ -287,17 +325,18 @@ static int __init longhaul_get_ranges(void)
minmult = 30;
else
minmult = multipliers[invalue];
fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB];
break;
}
case 3:
/* Nehemiah */
if (cpu_model==CPU_NEHEMIAH) {
rdmsrl (MSR_VIA_LONGHAUL, longhaul.val);
/*
* TODO: This code works, but raises a lot of questions.
* - Some Nehemiah's seem to have broken Min/MaxMHzBR's.
* We get around this by using a hardcoded multiplier of 5.0x
* We get around this by using a hardcoded multiplier of 4.0x
* for the minimimum speed, and the speed we booted up at for the max.
* This is done in longhaul_get_cpu_mult() by reading the EBLCR register.
* - According to some VIA documentation EBLCR is only
......@@ -305,7 +344,7 @@ static int __init longhaul_get_ranges(void)
* We're possibly using something undocumented and unsupported,
* But it works, so we don't grumble.
*/
minmult=50;
minmult=40;
maxmult=longhaul_get_cpu_mult();
/* Starting with the 1.2GHz parts, theres a 200MHz bus. */
......@@ -315,8 +354,9 @@ static int __init longhaul_get_ranges(void)
fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB];
break;
}
}
dprintk (KERN_INFO PFX "MinMult=%d.%dx MaxMult=%d.%dx\n",
dprintk (KERN_INFO PFX "MinMult:%d.%dx MaxMult:%d.%dx\n",
minmult/10, minmult%10, maxmult/10, maxmult%10);
if (fsb == -1) {
......@@ -324,10 +364,11 @@ static int __init longhaul_get_ranges(void)
return -EINVAL;
}
highest_speed = calc_speed (maxmult, fsb);
lowest_speed = calc_speed (minmult,fsb);
dprintk (KERN_INFO PFX "FSB: %dMHz Lowestspeed=%dMHz Highestspeed=%dMHz\n",
fsb, lowest_speed/1000, highest_speed/1000);
highest_speed = calc_speed(maxmult);
lowest_speed = calc_speed(minmult);
dprintk (KERN_INFO PFX "FSB:%dMHz ", fsb);
dprintk ("Lowest speed:%s ", print_speed(lowest_speed/1000));
dprintk ("Highest speed:%s\n", print_speed(highest_speed/1000));
if (lowest_speed == highest_speed) {
printk (KERN_INFO PFX "highestspeed == lowest, aborting.\n");
......@@ -350,7 +391,7 @@ static int __init longhaul_get_ranges(void)
continue;
if (ratio > maxmult || ratio < minmult)
continue;
longhaul_table[k].frequency = calc_speed (ratio, fsb);
longhaul_table[k].frequency = calc_speed(ratio);
longhaul_table[k].index = j;
k++;
}
......@@ -426,8 +467,7 @@ static int longhaul_verify(struct cpufreq_policy *policy)
static int longhaul_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
unsigned int target_freq, unsigned int relation)
{
unsigned int table_index = 0;
unsigned int new_clock_ratio = 0;
......@@ -442,13 +482,15 @@ static int longhaul_target(struct cpufreq_policy *policy,
return 0;
}
static unsigned int longhaul_get(unsigned int cpu)
{
if (cpu)
return 0;
return (calc_speed (longhaul_get_cpu_mult(), fsb));
return calc_speed(longhaul_get_cpu_mult());
}
static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *c = cpu_data;
......@@ -457,26 +499,31 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
switch (c->x86_model) {
case 6:
cpu_model = CPU_SAMUEL;
cpuname = "C3 'Samuel' [C5A]";
longhaul_version=1;
longhaul_version = TYPE_LONGHAUL_V1;
memcpy (clock_ratio, samuel1_clock_ratio, sizeof(samuel1_clock_ratio));
memcpy (eblcr_table, samuel1_eblcr, sizeof(samuel1_eblcr));
break;
case 7: /* C5B / C5C */
longhaul_version=1;
case 7:
longhaul_version = TYPE_LONGHAUL_V1;
switch (c->x86_mask) {
case 0:
cpu_model = CPU_SAMUEL2;
cpuname = "C3 'Samuel 2' [C5B]";
/* Note, this is not a typo, early Samuel2's had Samuel1 ratios. */
memcpy (clock_ratio, samuel1_clock_ratio, sizeof(samuel1_clock_ratio));
memcpy (eblcr_table, samuel2_eblcr, sizeof(samuel2_eblcr));
break;
case 1 ... 15:
if (c->x86_mask < 8)
if (c->x86_mask < 8) {
cpu_model = CPU_SAMUEL2;
cpuname = "C3 'Samuel 2' [C5B]";
else
} else {
cpu_model = CPU_EZRA;
cpuname = "C3 'Ezra' [C5C]";
}
memcpy (clock_ratio, ezra_clock_ratio, sizeof(ezra_clock_ratio));
memcpy (eblcr_table, ezra_eblcr, sizeof(ezra_eblcr));
break;
......@@ -484,15 +531,17 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
break;
case 8:
cpu_model = CPU_EZRA_T;
cpuname = "C3 'Ezra-T' [C5M]";
longhaul_version=2;
longhaul_version = TYPE_POWERSAVER;
numscales=32;
memcpy (clock_ratio, ezrat_clock_ratio, sizeof(ezrat_clock_ratio));
memcpy (eblcr_table, ezrat_eblcr, sizeof(ezrat_eblcr));
break;
case 9:
longhaul_version=3;
cpu_model = CPU_NEHEMIAH;
longhaul_version = TYPE_POWERSAVER;
numscales=32;
switch (c->x86_mask) {
case 0 ... 1:
......@@ -518,19 +567,28 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
break;
}
printk (KERN_INFO PFX "VIA %s CPU detected. Longhaul v%d supported.\n",
cpuname, longhaul_version);
printk (KERN_INFO PFX "VIA %s CPU detected. ", cpuname);
switch (longhaul_version) {
case TYPE_LONGHAUL_V1:
case TYPE_LONGHAUL_V2:
printk ("Longhaul v%d supported.\n", longhaul_version);
break;
case TYPE_POWERSAVER:
printk ("Powersaver supported.\n");
break;
};
ret = longhaul_get_ranges();
if (ret != 0)
return ret;
if ((longhaul_version==2) && (dont_scale_voltage==0))
if ((longhaul_version==TYPE_LONGHAUL_V2 || longhaul_version==TYPE_POWERSAVER) &&
(dont_scale_voltage==0))
longhaul_setup_voltagescaling();
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
policy->cur = calc_speed (longhaul_get_cpu_mult(), fsb);
policy->cur = calc_speed(longhaul_get_cpu_mult());
ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table);
if (ret)
......@@ -563,6 +621,7 @@ static struct cpufreq_driver longhaul_driver = {
.attr = longhaul_attr,
};
static int __init longhaul_init(void)
{
struct cpuinfo_x86 *c = cpu_data;
......@@ -580,16 +639,17 @@ static int __init longhaul_init(void)
return -ENODEV;
}
static void __exit longhaul_exit(void)
{
int i=0;
unsigned int new_clock_ratio;
while (clock_ratio[i] != maxmult)
i++;
new_clock_ratio = longhaul_table[i].index & 0xFF;
longhaul_setstate(new_clock_ratio);
for (i=0; i < numscales; i++) {
if (clock_ratio[i] == maxmult) {
longhaul_setstate(i);
break;
}
}
cpufreq_unregister_driver(&longhaul_driver);
kfree(longhaul_table);
......
......@@ -142,7 +142,7 @@ static void __init pcibios_allocate_resources(int pass)
DBG("PCI: Resource %08lx-%08lx (f=%lx, d=%d, p=%d)\n",
r->start, r->end, r->flags, disabled, pass);
pr = pci_find_parent_resource(dev, r);
if (!pr || request_resource(pr, r) < 0) {
if (!pr || insert_resource(pr, r) < 0) {
printk(KERN_ERR "PCI: Cannot allocate resource region %d of device %s\n", idx, pci_name(dev));
/* We'll assign a new address later */
r->end -= r->start;
......
......@@ -1037,6 +1037,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
* interrupts if necessary.
*/
beq .ret_from_except_lite
/* For a hash failure, we don't bother re-enabling interrupts */
ble- 12f
/*
* hash_page couldn't handle it, set soft interrupt enable back
* to what it was before the trap. Note that .local_irq_restore
......@@ -1047,6 +1050,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
b 11f
#else
beq fast_exception_return /* Return from exception on success */
ble- 12f /* Failure return from hash_page */
/* fall through */
#endif
......@@ -1066,6 +1071,15 @@ _GLOBAL(handle_page_fault)
bl .bad_page_fault
b .ret_from_except
/* We have a page fault that hash_page could handle but HV refused
* the PTE insertion
*/
12: bl .save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
lwz r4,_DAR(r1)
bl .low_hash_fault
b .ret_from_except
/* here we have a segment miss */
_GLOBAL(do_ste_alloc)
bl .ste_allocate /* try to insert stab entry */
......
......@@ -377,7 +377,7 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
lpar_rc = plpar_hcall(H_ENTER, flags, hpte_group, lhpte.dw0.dword0,
lhpte.dw1.dword1, &slot, &dummy0, &dummy1);
if (lpar_rc == H_PTEG_Full)
if (unlikely(lpar_rc == H_PTEG_Full))
return -1;
/*
......@@ -385,7 +385,7 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
* will fail. However we must catch the failure in hash_page
* or we will loop forever, so return -2 in this case.
*/
if (lpar_rc != H_Success)
if (unlikely(lpar_rc != H_Success))
return -2;
/* Because of iSeries, we have to pass down the secondary
......@@ -415,9 +415,7 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
if (lpar_rc == H_Success)
return i;
if (lpar_rc != H_Not_Found)
panic("Bad return code from pte remove rc = %lx\n",
lpar_rc);
BUG_ON(lpar_rc != H_Not_Found);
slot_offset++;
slot_offset &= 0x7;
......@@ -447,8 +445,7 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot, unsigned long newpp,
if (lpar_rc == H_Not_Found)
return -1;
if (lpar_rc != H_Success)
panic("bad return code from pte protect rc = %lx\n", lpar_rc);
BUG_ON(lpar_rc != H_Success);
return 0;
}
......@@ -467,8 +464,7 @@ static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1);
if (lpar_rc != H_Success)
panic("Error on pte read in get_hpte0 rc = %lx\n", lpar_rc);
BUG_ON(lpar_rc != H_Success);
return dword0;
}
......@@ -519,15 +515,12 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
vpn = va >> PAGE_SHIFT;
slot = pSeries_lpar_hpte_find(vpn);
if (slot == -1)
panic("updateboltedpp: Could not find page to bolt\n");
BUG_ON(slot == -1);
flags = newpp & 3;
lpar_rc = plpar_pte_protect(flags, slot, 0);
if (lpar_rc != H_Success)
panic("Bad return code from pte bolted protect rc = %lx\n",
lpar_rc);
BUG_ON(lpar_rc != H_Success);
}
static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
......@@ -546,8 +539,7 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
if (lpar_rc == H_Not_Found)
return;
if (lpar_rc != H_Success)
panic("Bad return code from invalidate rc = %lx\n", lpar_rc);
BUG_ON(lpar_rc != H_Success);
}
/*
......
......@@ -278,6 +278,10 @@ htab_wrong_access:
b bail
htab_pte_insert_failure:
b .htab_insert_failure
/* Bail out restoring old PTE */
ld r6,STK_PARM(r6)(r1)
std r31,0(r6)
li r3,-1
b bail
......@@ -28,6 +28,7 @@
#include <linux/ctype.h>
#include <linux/cache.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <asm/ppcdebug.h>
#include <asm/processor.h>
......@@ -236,14 +237,11 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
return pp;
}
/*
* Called by asm hashtable.S in case of critical insert failure
/* Result code is:
* 0 - handled
* 1 - normal page fault
* -1 - critical hash insertion error
*/
void htab_insert_failure(void)
{
panic("hash_page: pte_insert failed\n");
}
int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
{
void *pgdir;
......@@ -371,6 +369,25 @@ static inline void make_bl(unsigned int *insn_addr, void *func)
(unsigned long)insn_addr);
}
/*
* low_hash_fault is called when we the low level hash code failed
* to instert a PTE due to an hypervisor error
*/
void low_hash_fault(struct pt_regs *regs, unsigned long address)
{
if (user_mode(regs)) {
siginfo_t info;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void *)address;
force_sig_info(SIGBUS, &info, current);
return;
}
bad_page_fault(regs, address, SIGBUS);
}
void __init htab_finish_init(void)
{
extern unsigned int *htab_call_hpte_insert1;
......
......@@ -69,6 +69,21 @@ config CPU_FREQ_GOV_USERSPACE
If in doubt, say Y.
config CPU_FREQ_GOV_ONDEMAND
tristate "'ondemand' cpufreq policy governor"
depends on CPU_FREQ
help
'ondemand' - This driver adds a dynamic cpufreq policy governor.
The governor does a periodic polling and
changes frequency based on the CPU utilization.
The support for this governor depends on CPU capability to
do fast frequency switching (i.e, very low latency frequency
transitions).
For details, take a look at linux/Documentation/cpu-freq.
If in doubt, say N.
config CPU_FREQ_24_API
bool "/proc/sys/cpu/ interface (2.4. / OLD)"
depends on CPU_FREQ && SYSCTL && CPU_FREQ_GOV_USERSPACE
......
......@@ -5,6 +5,7 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o
obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
# CPUfreq cross-arch helpers
obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
......
/*
* drivers/cpufreq/cpufreq_ondemand.c
*
* Copyright (C) 2001 Russell King
* (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
* Jun Nakajima <jun.nakajima@intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ctype.h>
#include <linux/cpufreq.h>
#include <linux/sysctl.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/sysfs.h>
#include <linux/sched.h>
#include <linux/kmod.h>
#include <linux/workqueue.h>
#include <linux/jiffies.h>
#include <linux/config.h>
#include <linux/kernel_stat.h>
#include <linux/percpu.h>
/*
* dbs is used in this file as a shortform for demandbased switching
* It helps to keep variable names smaller, simpler
*/
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define MIN_FREQUENCY_UP_THRESHOLD (0)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
#define MIN_FREQUENCY_DOWN_THRESHOLD (0)
#define MAX_FREQUENCY_DOWN_THRESHOLD (100)
/*
* The polling frequency of this governor depends on the capability of
* the processor. Default polling frequency is 1000 times the transition
* latency of the processor. The governor will work on any processor with
* transition latency <= 10mS, using appropriate sampling
* rate.
* For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
* this governor will not work.
* All times here are in uS.
*/
static unsigned int def_sampling_rate;
#define MIN_SAMPLING_RATE (def_sampling_rate / 2)
#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
#define DEF_SAMPLING_DOWN_FACTOR (10)
#define TRANSITION_LATENCY_LIMIT (10 * 1000)
#define sampling_rate_in_HZ(x) ((x * HZ) / (1000 * 1000))
static void do_dbs_timer(void *data);
struct cpu_dbs_info_s {
struct cpufreq_policy *cur_policy;
unsigned int prev_cpu_idle_up;
unsigned int prev_cpu_idle_down;
unsigned int enable;
};
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
static DECLARE_MUTEX (dbs_sem);
static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
struct dbs_tuners {
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
unsigned int down_threshold;
};
struct dbs_tuners dbs_tuners_ins = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
};
/************************** sysfs interface ************************/
static ssize_t show_current_freq(struct cpufreq_policy *policy, char *buf)
{
return sprintf (buf, "%u\n", policy->cur);
}
static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
{
return sprintf (buf, "%u\n", MAX_SAMPLING_RATE);
}
static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
{
return sprintf (buf, "%u\n", MIN_SAMPLING_RATE);
}
#define define_one_ro(_name) \
static struct freq_attr _name = { \
.attr = { .name = __stringify(_name), .mode = 0444 }, \
.show = show_##_name, \
}
define_one_ro(current_freq);
define_one_ro(sampling_rate_max);
define_one_ro(sampling_rate_min);
/* cpufreq_ondemand Governor Tunables */
#define show_one(file_name, object) \
static ssize_t show_##file_name \
(struct cpufreq_policy *unused, char *buf) \
{ \
return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
}
show_one(sampling_rate, sampling_rate);
show_one(sampling_down_factor, sampling_down_factor);
show_one(up_threshold, up_threshold);
show_one(down_threshold, down_threshold);
static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
const char *buf, size_t count)
{
unsigned int input;
int ret;
ret = sscanf (buf, "%u", &input);
down(&dbs_sem);
if (ret != 1 )
goto out;
dbs_tuners_ins.sampling_down_factor = input;
out:
up(&dbs_sem);
return count;
}
static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
const char *buf, size_t count)
{
unsigned int input;
int ret;
ret = sscanf (buf, "%u", &input);
down(&dbs_sem);
if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE)
goto out;
dbs_tuners_ins.sampling_rate = input;
out:
up(&dbs_sem);
return count;
}
static ssize_t store_up_threshold(struct cpufreq_policy *unused,
const char *buf, size_t count)
{
unsigned int input;
int ret;
ret = sscanf (buf, "%u", &input);
down(&dbs_sem);
if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
input < MIN_FREQUENCY_UP_THRESHOLD ||
input <= dbs_tuners_ins.down_threshold)
goto out;
dbs_tuners_ins.up_threshold = input;
out:
up(&dbs_sem);
return count;
}
static ssize_t store_down_threshold(struct cpufreq_policy *unused,
const char *buf, size_t count)
{
unsigned int input;
int ret;
ret = sscanf (buf, "%u", &input);
down(&dbs_sem);
if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD ||
input < MIN_FREQUENCY_DOWN_THRESHOLD ||
input >= dbs_tuners_ins.up_threshold)
goto out;
dbs_tuners_ins.down_threshold = input;
out:
up(&dbs_sem);
return count;
}
#define define_one_rw(_name) \
static struct freq_attr _name = { \
.attr = { .name = __stringify(_name), .mode = 0644 }, \
.show = show_##_name, \
.store = store_##_name, \
}
define_one_rw(sampling_rate);
define_one_rw(sampling_down_factor);
define_one_rw(up_threshold);
define_one_rw(down_threshold);
static struct attribute * dbs_attributes[] = {
&current_freq.attr,
&sampling_rate_max.attr,
&sampling_rate_min.attr,
&sampling_rate.attr,
&sampling_down_factor.attr,
&up_threshold.attr,
&down_threshold.attr,
NULL
};
static struct attribute_group dbs_attr_group = {
.attrs = dbs_attributes,
.name = "ondemand",
};
/************************** sysfs end ************************/
static void dbs_check_cpu(int cpu)
{
unsigned int idle_ticks, up_idle_ticks, down_idle_ticks;
unsigned int freq_down_step;
unsigned int freq_down_sampling_rate;
static int down_skip[NR_CPUS];
struct cpu_dbs_info_s *this_dbs_info;
this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
if (!this_dbs_info->enable)
return;
/*
* The default safe range is 20% to 80%
* Every sampling_rate, we check
* - If current idle time is less than 20%, then we try to
* increase frequency
* Every sampling_rate*sampling_down_factor, we check
* - If current idle time is more than 80%, then we try to
* decrease frequency
*
* Any frequency increase takes it to the maximum frequency.
* Frequency reduction happens at minimum steps of
* 5% of max_frequency
*/
/* Check for frequency increase */
idle_ticks = kstat_cpu(cpu).cpustat.idle -
this_dbs_info->prev_cpu_idle_up;
this_dbs_info->prev_cpu_idle_up = kstat_cpu(cpu).cpustat.idle;
up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) *
sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate) / 100;
if (idle_ticks < up_idle_ticks) {
__cpufreq_driver_target(this_dbs_info->cur_policy,
this_dbs_info->cur_policy->max,
CPUFREQ_RELATION_H);
down_skip[cpu] = 0;
this_dbs_info->prev_cpu_idle_down = kstat_cpu(cpu).cpustat.idle;
return;
}
/* Check for frequency decrease */
down_skip[cpu]++;
if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor)
return;
idle_ticks = kstat_cpu(cpu).cpustat.idle -
this_dbs_info->prev_cpu_idle_down;
down_skip[cpu] = 0;
this_dbs_info->prev_cpu_idle_down = kstat_cpu(cpu).cpustat.idle;
freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
dbs_tuners_ins.sampling_down_factor;
down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) *
sampling_rate_in_HZ(freq_down_sampling_rate) / 100;
if (idle_ticks > down_idle_ticks ) {
freq_down_step = (5 * this_dbs_info->cur_policy->max) / 100;
__cpufreq_driver_target(this_dbs_info->cur_policy,
this_dbs_info->cur_policy->cur - freq_down_step,
CPUFREQ_RELATION_H);
return;
}
}
static void do_dbs_timer(void *data)
{
int i;
down(&dbs_sem);
for (i = 0; i < NR_CPUS; i++)
if (cpu_online(i))
dbs_check_cpu(i);
schedule_delayed_work(&dbs_work,
sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate));
up(&dbs_sem);
}
static inline void dbs_timer_init(void)
{
INIT_WORK(&dbs_work, do_dbs_timer, NULL);
schedule_work(&dbs_work);
return;
}
static inline void dbs_timer_exit(void)
{
cancel_delayed_work(&dbs_work);
return;
}
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int event)
{
unsigned int cpu = policy->cpu;
struct cpu_dbs_info_s *this_dbs_info;
this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
switch (event) {
case CPUFREQ_GOV_START:
if ((!cpu_online(cpu)) ||
(!policy->cur))
return -EINVAL;
if (policy->cpuinfo.transition_latency >
(TRANSITION_LATENCY_LIMIT * 1000))
return -EINVAL;
if (this_dbs_info->enable) /* Already enabled */
break;
down(&dbs_sem);
this_dbs_info->cur_policy = policy;
this_dbs_info->prev_cpu_idle_up =
kstat_cpu(cpu).cpustat.idle;
this_dbs_info->prev_cpu_idle_down =
kstat_cpu(cpu).cpustat.idle;
this_dbs_info->enable = 1;
sysfs_create_group(&policy->kobj, &dbs_attr_group);
dbs_enable++;
/*
* Start the timerschedule work, when this governor
* is used for first time
*/
if (dbs_enable == 1) {
/* policy latency is in nS. Convert it to uS first */
def_sampling_rate = (policy->cpuinfo.transition_latency / 1000) *
DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
dbs_tuners_ins.sampling_rate = def_sampling_rate;
dbs_timer_init();
}
up(&dbs_sem);
break;
case CPUFREQ_GOV_STOP:
down(&dbs_sem);
this_dbs_info->enable = 0;
sysfs_remove_group(&policy->kobj, &dbs_attr_group);
dbs_enable--;
/*
* Stop the timerschedule work, when this governor
* is used for first time
*/
if (dbs_enable == 0)
dbs_timer_exit();
up(&dbs_sem);
break;
case CPUFREQ_GOV_LIMITS:
down(&dbs_sem);
if (policy->max < this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(
this_dbs_info->cur_policy,
policy->max, CPUFREQ_RELATION_H);
else if (policy->min > this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(
this_dbs_info->cur_policy,
policy->min, CPUFREQ_RELATION_L);
up(&dbs_sem);
break;
}
return 0;
}
struct cpufreq_governor cpufreq_gov_dbs = {
.name = "ondemand",
.governor = cpufreq_governor_dbs,
.owner = THIS_MODULE,
};
EXPORT_SYMBOL(cpufreq_gov_dbs);
static int __init cpufreq_gov_dbs_init(void)
{
return cpufreq_register_governor(&cpufreq_gov_dbs);
}
static void __exit cpufreq_gov_dbs_exit(void)
{
/* Make sure that the scheduled work is indeed not running */
flush_scheduled_work();
cpufreq_unregister_governor(&cpufreq_gov_dbs);
}
MODULE_AUTHOR ("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
MODULE_DESCRIPTION ("'cpufreq_ondemand' - A dynamic cpufreq governor for "
"Low Latency Frequency Transition capable processors");
MODULE_LICENSE ("GPL");
module_init(cpufreq_gov_dbs_init);
module_exit(cpufreq_gov_dbs_exit);
......@@ -29,6 +29,7 @@
#define MANUFACTURER_AMD 0x0001
#define MANUFACTURER_ATMEL 0x001f
#define MANUFACTURER_FUJITSU 0x0004
#define MANUFACTURER_HYUNDAI 0x00AD
#define MANUFACTURER_INTEL 0x0089
#define MANUFACTURER_MACRONIX 0x00C2
#define MANUFACTURER_PMC 0x009D
......@@ -56,6 +57,7 @@
#define AM29F040 0x00A4
#define AM29LV040B 0x004F
#define AM29F032B 0x0041
#define AM29F002T 0x00B0
/* Atmel */
#define AT49BV512 0x0003
......@@ -77,6 +79,8 @@
#define MBM29LV400TC 0x22B9
#define MBM29LV400BC 0x22BA
/* Hyundai */
#define HY29F002T 0x00B0
/* Intel */
#define I28F004B3T 0x00d4
......@@ -106,6 +110,7 @@
#define MX29LV160T 0x22C4
#define MX29LV160B 0x2249
#define MX29F016 0x00AD
#define MX29F002T 0x00B0
#define MX29F004T 0x0045
#define MX29F004B 0x0046
......@@ -506,6 +511,17 @@ static const struct amd_flash_info jedec_table[] = {
.regions = {
ERASEINFO(0x10000,8),
}
}, {
mfr_id: MANUFACTURER_AMD,
dev_id: AM29F002T,
name: "AMD AM29F002T",
DevSize: SIZE_256KiB,
NumEraseRegions: 4,
regions: {ERASEINFO(0x10000,3),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = MANUFACTURER_ATMEL,
.dev_id = AT49BV512,
......@@ -751,6 +767,17 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
mfr_id: MANUFACTURER_HYUNDAI,
dev_id: HY29F002T,
name: "Hyundai HY29F002T",
DevSize: SIZE_256KiB,
NumEraseRegions: 4,
regions: {ERASEINFO(0x10000,3),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F004B3B,
......@@ -1134,6 +1161,17 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,7),
}
}, {
mfr_id: MANUFACTURER_MACRONIX,
dev_id: MX29F002T,
name: "Macronix MX29F002T",
DevSize: SIZE_256KiB,
NumEraseRegions: 4,
regions: {ERASEINFO(0x10000,3),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = MANUFACTURER_PMC,
.dev_id = PM49FL002,
......
......@@ -593,6 +593,7 @@ struct rtl8139_private {
int time_to_die;
struct mii_if_info mii;
unsigned int regs_len;
unsigned long fifo_copy_timeout;
};
MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>");
......@@ -1927,6 +1928,24 @@ static __inline__ void wrap_copy(struct sk_buff *skb, const unsigned char *ring,
}
#endif
static void rtl8139_isr_ack(struct rtl8139_private *tp)
{
void *ioaddr = tp->mmio_addr;
u16 status;
status = RTL_R16 (IntrStatus) & RxAckBits;
/* Clear out errors and receive interrupts */
if (likely(status != 0)) {
if (unlikely(status & (RxFIFOOver | RxOverflow))) {
tp->stats.rx_errors++;
if (status & RxFIFOOver)
tp->stats.rx_fifo_errors++;
}
RTL_W16_F (IntrStatus, RxAckBits);
}
}
static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
int budget)
{
......@@ -1934,9 +1953,10 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
int received = 0;
unsigned char *rx_ring = tp->rx_ring;
unsigned int cur_rx = tp->cur_rx;
unsigned int rx_size = 0;
DPRINTK ("%s: In rtl8139_rx(), current %4.4x BufAddr %4.4x,"
" free to %4.4x, Cmd %2.2x.\n", dev->name, cur_rx,
" free to %4.4x, Cmd %2.2x.\n", dev->name, (u16)cur_rx,
RTL_R16 (RxBufAddr),
RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd));
......@@ -1944,10 +1964,8 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
&& (RTL_R8 (ChipCmd) & RxBufEmpty) == 0) {
u32 ring_offset = cur_rx % RX_BUF_LEN;
u32 rx_status;
unsigned int rx_size;
unsigned int pkt_size;
struct sk_buff *skb;
u16 status;
rmb();
......@@ -1976,10 +1994,24 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
* since EarlyRx is disabled.
*/
if (unlikely(rx_size == 0xfff0)) {
if (!tp->fifo_copy_timeout)
tp->fifo_copy_timeout = jiffies + 2;
else if (time_after(jiffies, tp->fifo_copy_timeout)) {
DPRINTK ("%s: hung FIFO. Reset.", dev->name);
rx_size = 0;
goto no_early_rx;
}
if (netif_msg_intr(tp)) {
printk(KERN_DEBUG "%s: fifo copy in progress.",
dev->name);
}
tp->xstats.early_rx++;
goto done;
break;
}
no_early_rx:
tp->fifo_copy_timeout = 0;
/* If Rx err or invalid rx_size/rx_status received
* (which happens if we get lost in the ring),
* Rx process gets reset, so we abort any further
......@@ -1989,7 +2021,8 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
(rx_size < 8) ||
(!(rx_status & RxStatusOK)))) {
rtl8139_rx_err (rx_status, dev, tp, ioaddr);
return -1;
received = -1;
goto out;
}
/* Malloc up new buffer, compatible with net-2e. */
......@@ -2025,19 +2058,11 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
RTL_W16 (RxBufPtr, (u16) (cur_rx - 16));
/* Clear out errors and receive interrupts */
status = RTL_R16 (IntrStatus) & RxAckBits;
if (likely(status != 0)) {
if (unlikely(status & (RxFIFOOver | RxOverflow))) {
tp->stats.rx_errors++;
if (status & RxFIFOOver)
tp->stats.rx_fifo_errors++;
}
RTL_W16_F (IntrStatus, RxAckBits);
}
rtl8139_isr_ack(tp);
}
done:
if (unlikely(!received || rx_size == 0xfff0))
rtl8139_isr_ack(tp);
#if RTL8139_DEBUG > 1
DPRINTK ("%s: Done rtl8139_rx(), current %4.4x BufAddr %4.4x,"
......@@ -2047,6 +2072,15 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
#endif
tp->cur_rx = cur_rx;
/*
* The receive buffer should be mostly empty.
* Tell NAPI to reenable the Rx irq.
*/
if (tp->fifo_copy_timeout)
received = budget;
out:
return received;
}
......
......@@ -1752,7 +1752,7 @@ config VIA_VELOCITY
If you have a VIA "Velocity" based network card say Y here.
To compile this driver as a module, choose M here. The module
will be called via-rhine.
will be called via-velocity.
config LAN_SAA9730
bool "Philips SAA9730 Ethernet support (EXPERIMENTAL)"
......@@ -2047,7 +2047,19 @@ config R8169
config R8169_NAPI
bool "Use Rx and Tx Polling (NAPI) (EXPERIMENTAL)"
depends on R8169 && EXPERIMENTAL
help
NAPI is a new driver API designed to reduce CPU and interrupt load
when the driver is receiving lots of packets from the card. It is
still somewhat experimental and thus not yet enabled by default.
If your estimated Rx load is 10kpps or more, or if the card will be
deployed on potentially unfriendly networks (e.g. in a firewall),
then say Y here.
See <file:Documentation/networking/NAPI_HOWTO.txt> for more
information.
If in doubt, say N.
config SK98LIN
tristate "Marvell Yukon Chipset / SysKonnect SK-98xx Support"
......
......@@ -92,7 +92,7 @@ enum dl2x_offsets {
EepromCtrl = 0x4a,
ExpromAddr = 0x4c,
Exprodata = 0x50,
WakeEvent0x51,
WakeEvent = 0x51,
CountDown = 0x54,
IntStatusAck = 0x5a,
IntEnable = 0x5c,
......
......@@ -128,8 +128,8 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
static int e1000_set_mac(struct net_device *netdev, void *p);
static inline void e1000_irq_disable(struct e1000_adapter *adapter);
static inline void e1000_irq_enable(struct e1000_adapter *adapter);
static void e1000_irq_disable(struct e1000_adapter *adapter);
static void e1000_irq_enable(struct e1000_adapter *adapter);
static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs);
static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter);
#ifdef CONFIG_E1000_NAPI
......@@ -146,7 +146,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
void set_ethtool_ops(struct net_device *netdev);
static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
static inline void e1000_rx_checksum(struct e1000_adapter *adapter,
static void e1000_rx_checksum(struct e1000_adapter *adapter,
struct e1000_rx_desc *rx_desc,
struct sk_buff *skb);
static void e1000_tx_timeout(struct net_device *dev);
......@@ -2077,7 +2077,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
* @adapter: board private structure
**/
static inline void
static void
e1000_irq_disable(struct e1000_adapter *adapter)
{
atomic_inc(&adapter->irq_sem);
......@@ -2091,7 +2091,7 @@ e1000_irq_disable(struct e1000_adapter *adapter)
* @adapter: board private structure
**/
static inline void
static void
e1000_irq_enable(struct e1000_adapter *adapter)
{
if(likely(atomic_dec_and_test(&adapter->irq_sem))) {
......@@ -2593,7 +2593,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
* @sk_buff: socket buffer with received data
**/
static inline void
static void
e1000_rx_checksum(struct e1000_adapter *adapter,
struct e1000_rx_desc *rx_desc,
struct sk_buff *skb)
......
......@@ -307,8 +307,8 @@ static int gfar_probe(struct ocp_device *ocpdev)
/* Print out the device info */
printk(KERN_INFO DEVICE_NAME, dev->name);
for (idx = 0; idx < 6; idx++)
printk(KERN_INFO "%2.2x%c", dev->dev_addr[idx], idx == 5 ? ' ' : ':');
printk(KERN_INFO "\n");
printk("%2.2x%c", dev->dev_addr[idx], idx == 5 ? ' ' : ':');
printk("\n");
/* Even more device info helps when determining which kernel */
/* provided which set of benchmarks. Since this is global for all */
......
......@@ -559,7 +559,7 @@ static void hamachi_tx_timeout(struct net_device *dev);
static void hamachi_init_ring(struct net_device *dev);
static int hamachi_start_xmit(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t hamachi_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
static inline int hamachi_rx(struct net_device *dev);
static int hamachi_rx(struct net_device *dev);
static inline int hamachi_tx(struct net_device *dev);
static void hamachi_error(struct net_device *dev, int intr_status);
static int hamachi_close(struct net_device *dev);
......
......@@ -246,8 +246,14 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
static struct net_device_stats *scc_get_stats(struct net_device *dev);
static int scc_set_mac_address(struct net_device *dev, void *sa);
static irqreturn_t scc_isr(int irq, void *dev_id, struct pt_regs * regs);
static inline void tx_on(struct scc_priv *priv);
static inline void rx_on(struct scc_priv *priv);
static inline void rx_off(struct scc_priv *priv);
static void start_timer(struct scc_priv *priv, int t, int r15);
static inline unsigned char random(void);
static inline void z8530_isr(struct scc_info *info);
static irqreturn_t scc_isr(int irq, void *dev_id, struct pt_regs * regs);
static void rx_isr(struct scc_priv *priv);
static void special_condition(struct scc_priv *priv, int rc);
static void rx_bh(void *arg);
......@@ -255,12 +261,6 @@ static void tx_isr(struct scc_priv *priv);
static void es_isr(struct scc_priv *priv);
static void tm_isr(struct scc_priv *priv);
static inline void tx_on(struct scc_priv *priv);
static inline void rx_on(struct scc_priv *priv);
static inline void rx_off(struct scc_priv *priv);
static void start_timer(struct scc_priv *priv, int t, int r15);
static inline unsigned char random(void);
/* Initialization variables */
......@@ -945,42 +945,115 @@ static int scc_set_mac_address(struct net_device *dev, void *sa) {
}
static irqreturn_t scc_isr(int irq, void *dev_id, struct pt_regs * regs) {
struct scc_info *info = dev_id;
static inline void tx_on(struct scc_priv *priv) {
int i, n;
unsigned long flags;
spin_lock(info->priv[0].register_lock);
/* At this point interrupts are enabled, and the interrupt under service
is already acknowledged, but masked off.
if (priv->param.dma >= 0) {
n = (priv->chip == Z85230) ? 3 : 1;
/* Program DMA controller */
flags = claim_dma_lock();
set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
set_dma_addr(priv->param.dma, (int) priv->tx_buf[priv->tx_tail]+n);
set_dma_count(priv->param.dma, priv->tx_len[priv->tx_tail]-n);
release_dma_lock(flags);
/* Enable TX underrun interrupt */
write_scc(priv, R15, TxUIE);
/* Configure DREQ */
if (priv->type == TYPE_TWIN)
outb((priv->param.dma == 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
priv->card_base + TWIN_DMA_CFG);
else
write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN | WT_RDY_ENAB);
/* Write first byte(s) */
spin_lock_irqsave(priv->register_lock, flags);
for (i = 0; i < n; i++)
write_scc_data(priv, priv->tx_buf[priv->tx_tail][i], 1);
enable_dma(priv->param.dma);
spin_unlock_irqrestore(priv->register_lock, flags);
} else {
write_scc(priv, R15, TxUIE);
write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
tx_isr(priv);
}
/* Reset EOM latch if we do not have the AUTOEOM feature */
if (priv->chip == Z8530) write_scc(priv, R0, RES_EOM_L);
}
Interrupt processing: We loop until we know that the IRQ line is
low. If another positive edge occurs afterwards during the ISR,
another interrupt will be triggered by the interrupt controller
as soon as the IRQ level is enabled again (see asm/irq.h).
Bottom-half handlers will be processed after scc_isr(). This is
important, since we only have small ringbuffers and want new data
to be fetched/delivered immediately. */
static inline void rx_on(struct scc_priv *priv) {
unsigned long flags;
if (info->priv[0].type == TYPE_TWIN) {
int is, card_base = info->priv[0].card_base;
while ((is = ~inb(card_base + TWIN_INT_REG)) &
TWIN_INT_MSK) {
if (is & TWIN_SCC_MSK) {
z8530_isr(info);
} else if (is & TWIN_TMR1_MSK) {
inb(card_base + TWIN_CLR_TMR1);
tm_isr(&info->priv[0]);
/* Clear RX FIFO */
while (read_scc(priv, R0) & Rx_CH_AV) read_scc_data(priv);
priv->rx_over = 0;
if (priv->param.dma >= 0) {
/* Program DMA controller */
flags = claim_dma_lock();
set_dma_mode(priv->param.dma, DMA_MODE_READ);
set_dma_addr(priv->param.dma, (int) priv->rx_buf[priv->rx_head]);
set_dma_count(priv->param.dma, BUF_SIZE);
release_dma_lock(flags);
enable_dma(priv->param.dma);
/* Configure PackeTwin DMA */
if (priv->type == TYPE_TWIN) {
outb((priv->param.dma == 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
priv->card_base + TWIN_DMA_CFG);
}
/* Sp. cond. intr. only, ext int enable, RX DMA enable */
write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
} else {
inb(card_base + TWIN_CLR_TMR2);
tm_isr(&info->priv[1]);
/* Reset current frame */
priv->rx_ptr = 0;
/* Intr. on all Rx characters and Sp. cond., ext int enable */
write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
WT_FN_RDYFN);
}
write_scc(priv, R0, ERR_RES);
write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
}
static inline void rx_off(struct scc_priv *priv) {
/* Disable receiver */
write_scc(priv, R3, Rx8);
/* Disable DREQ / RX interrupt */
if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
outb(0, priv->card_base + TWIN_DMA_CFG);
else
write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
/* Disable DMA */
if (priv->param.dma >= 0) disable_dma(priv->param.dma);
}
static void start_timer(struct scc_priv *priv, int t, int r15) {
unsigned long flags;
outb(priv->tmr_mode, priv->tmr_ctrl);
if (t == 0) {
tm_isr(priv);
} else if (t > 0) {
save_flags(flags);
cli();
outb(t & 0xFF, priv->tmr_cnt);
outb((t >> 8) & 0xFF, priv->tmr_cnt);
if (priv->type != TYPE_TWIN) {
write_scc(priv, R15, r15 | CTSIE);
priv->rr0 |= CTS;
}
restore_flags(flags);
}
} else z8530_isr(info);
spin_unlock(info->priv[0].register_lock);
return IRQ_HANDLED;
}
static inline unsigned char random(void) {
/* See "Numerical Recipes in C", second edition, p. 284 */
rand = rand * 1664525L + 1013904223L;
return (unsigned char) (rand >> 24);
}
static inline void z8530_isr(struct scc_info *info) {
int is, i = 100;
......@@ -1009,6 +1082,42 @@ static inline void z8530_isr(struct scc_info *info) {
}
static irqreturn_t scc_isr(int irq, void *dev_id, struct pt_regs * regs) {
struct scc_info *info = dev_id;
spin_lock(info->priv[0].register_lock);
/* At this point interrupts are enabled, and the interrupt under service
is already acknowledged, but masked off.
Interrupt processing: We loop until we know that the IRQ line is
low. If another positive edge occurs afterwards during the ISR,
another interrupt will be triggered by the interrupt controller
as soon as the IRQ level is enabled again (see asm/irq.h).
Bottom-half handlers will be processed after scc_isr(). This is
important, since we only have small ringbuffers and want new data
to be fetched/delivered immediately. */
if (info->priv[0].type == TYPE_TWIN) {
int is, card_base = info->priv[0].card_base;
while ((is = ~inb(card_base + TWIN_INT_REG)) &
TWIN_INT_MSK) {
if (is & TWIN_SCC_MSK) {
z8530_isr(info);
} else if (is & TWIN_TMR1_MSK) {
inb(card_base + TWIN_CLR_TMR1);
tm_isr(&info->priv[0]);
} else {
inb(card_base + TWIN_CLR_TMR2);
tm_isr(&info->priv[1]);
}
}
} else z8530_isr(info);
spin_unlock(info->priv[0].register_lock);
return IRQ_HANDLED;
}
static void rx_isr(struct scc_priv *priv) {
if (priv->param.dma >= 0) {
/* Check special condition and perform error reset. See 2.4.7.5. */
......@@ -1292,114 +1401,3 @@ static void tm_isr(struct scc_priv *priv) {
break;
}
}
static inline void tx_on(struct scc_priv *priv) {
int i, n;
unsigned long flags;
if (priv->param.dma >= 0) {
n = (priv->chip == Z85230) ? 3 : 1;
/* Program DMA controller */
flags = claim_dma_lock();
set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
set_dma_addr(priv->param.dma, (int) priv->tx_buf[priv->tx_tail]+n);
set_dma_count(priv->param.dma, priv->tx_len[priv->tx_tail]-n);
release_dma_lock(flags);
/* Enable TX underrun interrupt */
write_scc(priv, R15, TxUIE);
/* Configure DREQ */
if (priv->type == TYPE_TWIN)
outb((priv->param.dma == 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
priv->card_base + TWIN_DMA_CFG);
else
write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN | WT_RDY_ENAB);
/* Write first byte(s) */
spin_lock_irqsave(priv->register_lock, flags);
for (i = 0; i < n; i++)
write_scc_data(priv, priv->tx_buf[priv->tx_tail][i], 1);
enable_dma(priv->param.dma);
spin_unlock_irqrestore(priv->register_lock, flags);
} else {
write_scc(priv, R15, TxUIE);
write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
tx_isr(priv);
}
/* Reset EOM latch if we do not have the AUTOEOM feature */
if (priv->chip == Z8530) write_scc(priv, R0, RES_EOM_L);
}
static inline void rx_on(struct scc_priv *priv) {
unsigned long flags;
/* Clear RX FIFO */
while (read_scc(priv, R0) & Rx_CH_AV) read_scc_data(priv);
priv->rx_over = 0;
if (priv->param.dma >= 0) {
/* Program DMA controller */
flags = claim_dma_lock();
set_dma_mode(priv->param.dma, DMA_MODE_READ);
set_dma_addr(priv->param.dma, (int) priv->rx_buf[priv->rx_head]);
set_dma_count(priv->param.dma, BUF_SIZE);
release_dma_lock(flags);
enable_dma(priv->param.dma);
/* Configure PackeTwin DMA */
if (priv->type == TYPE_TWIN) {
outb((priv->param.dma == 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
priv->card_base + TWIN_DMA_CFG);
}
/* Sp. cond. intr. only, ext int enable, RX DMA enable */
write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
} else {
/* Reset current frame */
priv->rx_ptr = 0;
/* Intr. on all Rx characters and Sp. cond., ext int enable */
write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
WT_FN_RDYFN);
}
write_scc(priv, R0, ERR_RES);
write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
}
static inline void rx_off(struct scc_priv *priv) {
/* Disable receiver */
write_scc(priv, R3, Rx8);
/* Disable DREQ / RX interrupt */
if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
outb(0, priv->card_base + TWIN_DMA_CFG);
else
write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
/* Disable DMA */
if (priv->param.dma >= 0) disable_dma(priv->param.dma);
}
static void start_timer(struct scc_priv *priv, int t, int r15) {
unsigned long flags;
outb(priv->tmr_mode, priv->tmr_ctrl);
if (t == 0) {
tm_isr(priv);
} else if (t > 0) {
save_flags(flags);
cli();
outb(t & 0xFF, priv->tmr_cnt);
outb((t >> 8) & 0xFF, priv->tmr_cnt);
if (priv->type != TYPE_TWIN) {
write_scc(priv, R15, r15 | CTSIE);
priv->rr0 |= CTS;
}
restore_flags(flags);
}
}
static inline unsigned char random(void) {
/* See "Numerical Recipes in C", second edition, p. 284 */
rand = rand * 1664525L + 1013904223L;
return (unsigned char) (rand >> 24);
}
......@@ -55,6 +55,8 @@ MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
/* Local Function Prototypes */
static inline void ixgb_irq_disable(struct ixgb_adapter *adapter);
static inline void ixgb_irq_enable(struct ixgb_adapter *adapter);
int ixgb_up(struct ixgb_adapter *adapter);
void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog);
void ixgb_reset(struct ixgb_adapter *adapter);
......@@ -82,10 +84,11 @@ static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
static int ixgb_set_mac(struct net_device *netdev, void *p);
static void ixgb_update_stats(struct ixgb_adapter *adapter);
static inline void ixgb_irq_disable(struct ixgb_adapter *adapter);
static inline void ixgb_irq_enable(struct ixgb_adapter *adapter);
static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs);
static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
static inline void ixgb_rx_checksum(struct ixgb_adapter *adapter,
struct ixgb_rx_desc *rx_desc,
struct sk_buff *skb);
#ifdef CONFIG_IXGB_NAPI
static int ixgb_clean(struct net_device *netdev, int *budget);
static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
......@@ -95,9 +98,6 @@ static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
#endif
static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
static int ixgb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
static inline void ixgb_rx_checksum(struct ixgb_adapter *adapter,
struct ixgb_rx_desc *rx_desc,
struct sk_buff *skb);
static void ixgb_tx_timeout(struct net_device *dev);
static void ixgb_tx_timeout_task(struct net_device *dev);
static void ixgb_vlan_rx_register(struct net_device *netdev,
......@@ -185,6 +185,34 @@ static void __exit ixgb_exit_module(void)
module_exit(ixgb_exit_module);
/**
* ixgb_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
**/
static inline void ixgb_irq_disable(struct ixgb_adapter *adapter)
{
atomic_inc(&adapter->irq_sem);
IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
IXGB_WRITE_FLUSH(&adapter->hw);
synchronize_irq(adapter->pdev->irq);
}
/**
* ixgb_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
**/
static inline void ixgb_irq_enable(struct ixgb_adapter *adapter)
{
if (atomic_dec_and_test(&adapter->irq_sem)) {
IXGB_WRITE_REG(&adapter->hw, IMS,
IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW |
IXGB_INT_RXO | IXGB_INT_LSC);
IXGB_WRITE_FLUSH(&adapter->hw);
}
}
int ixgb_up(struct ixgb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
......@@ -1550,34 +1578,6 @@ static void ixgb_update_stats(struct ixgb_adapter *adapter)
adapter->net_stats.tx_window_errors = 0;
}
/**
* ixgb_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
**/
static inline void ixgb_irq_disable(struct ixgb_adapter *adapter)
{
atomic_inc(&adapter->irq_sem);
IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
IXGB_WRITE_FLUSH(&adapter->hw);
synchronize_irq(adapter->pdev->irq);
}
/**
* ixgb_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
**/
static inline void ixgb_irq_enable(struct ixgb_adapter *adapter)
{
if (atomic_dec_and_test(&adapter->irq_sem)) {
IXGB_WRITE_REG(&adapter->hw, IMS,
IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW |
IXGB_INT_RXO | IXGB_INT_LSC);
IXGB_WRITE_FLUSH(&adapter->hw);
}
}
#define IXGB_MAX_INTR 10
/**
* ixgb_intr - Interrupt Handler
......@@ -1729,6 +1729,39 @@ static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
return cleaned;
}
/**
* ixgb_rx_checksum - Receive Checksum Offload for 82597.
* @adapter: board private structure
* @rx_desc: receive descriptor
* @sk_buff: socket buffer with received data
**/
static inline void
ixgb_rx_checksum(struct ixgb_adapter *adapter,
struct ixgb_rx_desc *rx_desc, struct sk_buff *skb)
{
/* Ignore Checksum bit is set OR
* TCP Checksum has not been calculated
*/
if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
(!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
skb->ip_summed = CHECKSUM_NONE;
return;
}
/* At this point we know the hardware did the TCP checksum */
/* now look at the TCP checksum error bit */
if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
/* let the stack verify checksum errors */
skb->ip_summed = CHECKSUM_NONE;
adapter->hw_csum_rx_error++;
} else {
/* TCP checksum is good */
skb->ip_summed = CHECKSUM_UNNECESSARY;
adapter->hw_csum_rx_good++;
}
}
/**
* ixgb_clean_rx_irq - Send received data up the network stack,
* @adapter: board private structure
......@@ -1955,39 +1988,6 @@ static int ixgb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return 0;
}
/**
* ixgb_rx_checksum - Receive Checksum Offload for 82597.
* @adapter: board private structure
* @rx_desc: receive descriptor
* @sk_buff: socket buffer with received data
**/
static inline void
ixgb_rx_checksum(struct ixgb_adapter *adapter,
struct ixgb_rx_desc *rx_desc, struct sk_buff *skb)
{
/* Ignore Checksum bit is set OR
* TCP Checksum has not been calculated
*/
if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
(!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
skb->ip_summed = CHECKSUM_NONE;
return;
}
/* At this point we know the hardware did the TCP checksum */
/* now look at the TCP checksum error bit */
if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
/* let the stack verify checksum errors */
skb->ip_summed = CHECKSUM_NONE;
adapter->hw_csum_rx_error++;
} else {
/* TCP checksum is good */
skb->ip_summed = CHECKSUM_UNNECESSARY;
adapter->hw_csum_rx_good++;
}
}
/**
* ixgb_vlan_rx_register - enables or disables vlan tagging/stripping.
*
......
......@@ -1139,6 +1139,49 @@ static irqreturn_t rr_interrupt(int irq, void *dev_id, struct pt_regs *ptregs)
return IRQ_HANDLED;
}
static inline void rr_raz_tx(struct rr_private *rrpriv,
struct net_device *dev)
{
int i;
for (i = 0; i < TX_RING_ENTRIES; i++) {
struct sk_buff *skb = rrpriv->tx_skbuff[i];
if (skb) {
struct tx_desc *desc = &(rrpriv->tx_ring[i]);
pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo,
skb->len, PCI_DMA_TODEVICE);
desc->size = 0;
set_rraddr(&desc->addr, 0);
dev_kfree_skb(skb);
rrpriv->tx_skbuff[i] = NULL;
}
}
}
static inline void rr_raz_rx(struct rr_private *rrpriv,
struct net_device *dev)
{
int i;
for (i = 0; i < RX_RING_ENTRIES; i++) {
struct sk_buff *skb = rrpriv->rx_skbuff[i];
if (skb) {
struct rx_desc *desc = &(rrpriv->rx_ring[i]);
pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo,
dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE);
desc->size = 0;
set_rraddr(&desc->addr, 0);
dev_kfree_skb(skb);
rrpriv->rx_skbuff[i] = NULL;
}
}
}
static void rr_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
......@@ -1254,49 +1297,6 @@ static int rr_open(struct net_device *dev)
}
static inline void rr_raz_tx(struct rr_private *rrpriv,
struct net_device *dev)
{
int i;
for (i = 0; i < TX_RING_ENTRIES; i++) {
struct sk_buff *skb = rrpriv->tx_skbuff[i];
if (skb) {
struct tx_desc *desc = &(rrpriv->tx_ring[i]);
pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo,
skb->len, PCI_DMA_TODEVICE);
desc->size = 0;
set_rraddr(&desc->addr, 0);
dev_kfree_skb(skb);
rrpriv->tx_skbuff[i] = NULL;
}
}
}
static inline void rr_raz_rx(struct rr_private *rrpriv,
struct net_device *dev)
{
int i;
for (i = 0; i < RX_RING_ENTRIES; i++) {
struct sk_buff *skb = rrpriv->rx_skbuff[i];
if (skb) {
struct rx_desc *desc = &(rrpriv->rx_ring[i]);
pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo,
dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE);
desc->size = 0;
set_rraddr(&desc->addr, 0);
dev_kfree_skb(skb);
rrpriv->rx_skbuff[i] = NULL;
}
}
}
static void rr_dump(struct net_device *dev)
{
struct rr_private *rrpriv;
......
......@@ -110,10 +110,7 @@
#include <linux/module.h>
#include <linux/init.h>
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
#endif
#include "h/skdrv1st.h"
#include "h/skdrv2nd.h"
......@@ -5113,9 +5110,12 @@ static void __devexit skge_remove_one(struct pci_dev *pdev)
if ((pAC->GIni.GIMacsFound == 2) && pAC->RlmtNets == 2)
have_second_mac = 1;
remove_proc_entry(dev->name, pSkRootDir);
unregister_netdev(dev);
if (have_second_mac)
if (have_second_mac) {
remove_proc_entry(pAC->dev[1]->name, pSkRootDir);
unregister_netdev(pAC->dev[1]);
}
SkGeYellowLED(pAC, pAC->IoBase, 0);
......@@ -5182,9 +5182,9 @@ static int __init skge_init(void)
{
int error;
#ifdef CONFIG_PROC_FS
memcpy(&SK_Root_Dir_entry, BOOT_STRING, sizeof(SK_Root_Dir_entry) - 1);
#ifdef CONFIG_PROC_FS
pSkRootDir = proc_mkdir(SK_Root_Dir_entry, proc_net);
if (!pSkRootDir) {
printk(KERN_WARNING "Unable to create /proc/net/%s",
......
......@@ -1191,133 +1191,6 @@ static void smc_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
/*--------------------------------------------------------------------
.
. This is the main routine of the driver, to handle the device when
. it needs some attention.
.
. So:
. first, save state of the chipset
. branch off into routines to handle each case, and acknowledge
. each to the interrupt register
. and finally restore state.
.
---------------------------------------------------------------------*/
static irqreturn_t smc_interrupt(int irq, void * dev_id, struct pt_regs * regs)
{
struct net_device *dev = dev_id;
int ioaddr = dev->base_addr;
struct smc_local *lp = netdev_priv(dev);
byte status;
word card_stats;
byte mask;
int timeout;
/* state registers */
word saved_bank;
word saved_pointer;
int handled = 0;
PRINTK3((CARDNAME": SMC interrupt started \n"));
saved_bank = inw( ioaddr + BANK_SELECT );
SMC_SELECT_BANK(2);
saved_pointer = inw( ioaddr + POINTER );
mask = inb( ioaddr + INT_MASK );
/* clear all interrupts */
outb( 0, ioaddr + INT_MASK );
/* set a timeout value, so I don't stay here forever */
timeout = 4;
PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x \n", mask ));
do {
/* read the status flag, and mask it */
status = inb( ioaddr + INTERRUPT ) & mask;
if (!status )
break;
handled = 1;
PRINTK3((KERN_WARNING CARDNAME
": Handling interrupt status %x \n", status ));
if (status & IM_RCV_INT) {
/* Got a packet(s). */
PRINTK2((KERN_WARNING CARDNAME
": Receive Interrupt\n"));
smc_rcv(dev);
} else if (status & IM_TX_INT ) {
PRINTK2((KERN_WARNING CARDNAME
": TX ERROR handled\n"));
smc_tx(dev);
outb(IM_TX_INT, ioaddr + INTERRUPT );
} else if (status & IM_TX_EMPTY_INT ) {
/* update stats */
SMC_SELECT_BANK( 0 );
card_stats = inw( ioaddr + COUNTER );
/* single collisions */
lp->stats.collisions += card_stats & 0xF;
card_stats >>= 4;
/* multiple collisions */
lp->stats.collisions += card_stats & 0xF;
/* these are for when linux supports these statistics */
SMC_SELECT_BANK( 2 );
PRINTK2((KERN_WARNING CARDNAME
": TX_BUFFER_EMPTY handled\n"));
outb( IM_TX_EMPTY_INT, ioaddr + INTERRUPT );
mask &= ~IM_TX_EMPTY_INT;
lp->stats.tx_packets += lp->packets_waiting;
lp->packets_waiting = 0;
} else if (status & IM_ALLOC_INT ) {
PRINTK2((KERN_DEBUG CARDNAME
": Allocation interrupt \n"));
/* clear this interrupt so it doesn't happen again */
mask &= ~IM_ALLOC_INT;
smc_hardware_send_packet( dev );
/* enable xmit interrupts based on this */
mask |= ( IM_TX_EMPTY_INT | IM_TX_INT );
/* and let the card send more packets to me */
netif_wake_queue(dev);
PRINTK2((CARDNAME": Handoff done successfully.\n"));
} else if (status & IM_RX_OVRN_INT ) {
lp->stats.rx_errors++;
lp->stats.rx_fifo_errors++;
outb( IM_RX_OVRN_INT, ioaddr + INTERRUPT );
} else if (status & IM_EPH_INT ) {
PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT \n"));
} else if (status & IM_ERCV_INT ) {
PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT \n"));
outb( IM_ERCV_INT, ioaddr + INTERRUPT );
}
} while ( timeout -- );
/* restore state register */
SMC_SELECT_BANK( 2 );
outb( mask, ioaddr + INT_MASK );
PRINTK3(( KERN_WARNING CARDNAME ": MASK is now %x \n", mask ));
outw( saved_pointer, ioaddr + POINTER );
SMC_SELECT_BANK( saved_bank );
PRINTK3((CARDNAME ": Interrupt done\n"));
return IRQ_RETVAL(handled);
}
/*-------------------------------------------------------------
.
. smc_rcv - receive a packet from the card
......@@ -1509,6 +1382,134 @@ static void smc_tx( struct net_device * dev )
return;
}
/*--------------------------------------------------------------------
.
. This is the main routine of the driver, to handle the device when
. it needs some attention.
.
. So:
. first, save state of the chipset
. branch off into routines to handle each case, and acknowledge
. each to the interrupt register
. and finally restore state.
.
---------------------------------------------------------------------*/
static irqreturn_t smc_interrupt(int irq, void * dev_id, struct pt_regs * regs)
{
struct net_device *dev = dev_id;
int ioaddr = dev->base_addr;
struct smc_local *lp = netdev_priv(dev);
byte status;
word card_stats;
byte mask;
int timeout;
/* state registers */
word saved_bank;
word saved_pointer;
int handled = 0;
PRINTK3((CARDNAME": SMC interrupt started \n"));
saved_bank = inw( ioaddr + BANK_SELECT );
SMC_SELECT_BANK(2);
saved_pointer = inw( ioaddr + POINTER );
mask = inb( ioaddr + INT_MASK );
/* clear all interrupts */
outb( 0, ioaddr + INT_MASK );
/* set a timeout value, so I don't stay here forever */
timeout = 4;
PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x \n", mask ));
do {
/* read the status flag, and mask it */
status = inb( ioaddr + INTERRUPT ) & mask;
if (!status )
break;
handled = 1;
PRINTK3((KERN_WARNING CARDNAME
": Handling interrupt status %x \n", status ));
if (status & IM_RCV_INT) {
/* Got a packet(s). */
PRINTK2((KERN_WARNING CARDNAME
": Receive Interrupt\n"));
smc_rcv(dev);
} else if (status & IM_TX_INT ) {
PRINTK2((KERN_WARNING CARDNAME
": TX ERROR handled\n"));
smc_tx(dev);
outb(IM_TX_INT, ioaddr + INTERRUPT );
} else if (status & IM_TX_EMPTY_INT ) {
/* update stats */
SMC_SELECT_BANK( 0 );
card_stats = inw( ioaddr + COUNTER );
/* single collisions */
lp->stats.collisions += card_stats & 0xF;
card_stats >>= 4;
/* multiple collisions */
lp->stats.collisions += card_stats & 0xF;
/* these are for when linux supports these statistics */
SMC_SELECT_BANK( 2 );
PRINTK2((KERN_WARNING CARDNAME
": TX_BUFFER_EMPTY handled\n"));
outb( IM_TX_EMPTY_INT, ioaddr + INTERRUPT );
mask &= ~IM_TX_EMPTY_INT;
lp->stats.tx_packets += lp->packets_waiting;
lp->packets_waiting = 0;
} else if (status & IM_ALLOC_INT ) {
PRINTK2((KERN_DEBUG CARDNAME
": Allocation interrupt \n"));
/* clear this interrupt so it doesn't happen again */
mask &= ~IM_ALLOC_INT;
smc_hardware_send_packet( dev );
/* enable xmit interrupts based on this */
mask |= ( IM_TX_EMPTY_INT | IM_TX_INT );
/* and let the card send more packets to me */
netif_wake_queue(dev);
PRINTK2((CARDNAME": Handoff done successfully.\n"));
} else if (status & IM_RX_OVRN_INT ) {
lp->stats.rx_errors++;
lp->stats.rx_fifo_errors++;
outb( IM_RX_OVRN_INT, ioaddr + INTERRUPT );
} else if (status & IM_EPH_INT ) {
PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT \n"));
} else if (status & IM_ERCV_INT ) {
PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT \n"));
outb( IM_ERCV_INT, ioaddr + INTERRUPT );
}
} while ( timeout -- );
/* restore state register */
SMC_SELECT_BANK( 2 );
outb( mask, ioaddr + INT_MASK );
PRINTK3(( KERN_WARNING CARDNAME ": MASK is now %x \n", mask ));
outw( saved_pointer, ioaddr + POINTER );
SMC_SELECT_BANK( saved_bank );
PRINTK3((CARDNAME ": Interrupt done\n"));
return IRQ_RETVAL(handled);
}
/*----------------------------------------------------
. smc_close
.
......
......@@ -314,13 +314,13 @@ static u16 phy_read_1bit(unsigned long);
static u8 dmfe_sense_speed(struct dmfe_board_info *);
static void dmfe_process_mode(struct dmfe_board_info *);
static void dmfe_timer(unsigned long);
static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
static void dmfe_dynamic_reset(struct DEVICE *);
static void dmfe_free_rxbuffer(struct dmfe_board_info *);
static void dmfe_init_dm910x(struct DEVICE *);
static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
static void dmfe_parse_srom(struct dmfe_board_info *);
static void dmfe_program_DM9801(struct dmfe_board_info *, int);
static void dmfe_program_DM9802(struct dmfe_board_info *);
......@@ -884,6 +884,20 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
}
/*
* Calculate the CRC valude of the Rx packet
* flag = 1 : return the reverse CRC (for the received packet CRC)
* 0 : return the normal CRC (for Hash Table index)
*/
static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
{
u32 crc = crc32(~0, Data, Len);
if (flag) crc = ~crc;
return crc;
}
/*
* Receive the come packet and pass to upper layer
*/
......@@ -1773,20 +1787,6 @@ static u16 phy_read_1bit(unsigned long ioaddr)
}
/*
* Calculate the CRC valude of the Rx packet
* flag = 1 : return the reverse CRC (for the received packet CRC)
* 0 : return the normal CRC (for Hash Table index)
*/
static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
{
u32 crc = crc32(~0, Data, Len);
if (flag) crc = ~crc;
return crc;
}
/*
* Parser SROM and media mode
*/
......
......@@ -346,7 +346,7 @@ enum rhine_revs {
VT6105L = 0x8A,
VT6107 = 0x8C,
VTunknown2 = 0x8E,
VT6105M = 0x90,
VT6105M = 0x90, /* Management adapter */
};
enum rhine_quirks {
......@@ -485,6 +485,7 @@ struct rhine_private {
dma_addr_t tx_bufs_dma;
struct pci_dev *pdev;
long pioaddr;
struct net_device_stats stats;
spinlock_t lock;
......@@ -593,7 +594,7 @@ static void rhine_power_init(struct net_device *dev)
default:
reason = "Unknown";
}
printk("%s: Woke system up. Reason: %s.\n",
printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
DRV_NAME, reason);
}
}
......@@ -703,7 +704,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
long memaddr;
long ioaddr;
int io_size, phy_id;
const char *name, *mname;
const char *name;
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
......@@ -718,41 +719,24 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
phy_id = 0;
quirks = 0;
name = "Rhine";
mname = "unknown";
if (pci_rev < VTunknown0) {
quirks = rqRhineI;
io_size = 128;
mname = "VT86C100A";
}
else if (pci_rev >= VT6102) {
quirks = rqWOL | rqForceReset;
if (pci_rev < VT6105) {
name = "Rhine II";
quirks |= rqStatusWBRace; /* Rhine-II exclusive */
if (pci_rev < VT8231)
mname = "VT6102";
else if (pci_rev < VT8233)
mname = "VT8231";
else if (pci_rev < VT8235)
mname = "VT8233";
else if (pci_rev < VT8237)
mname = "VT8235";
else if (pci_rev < VTunknown1)
mname = "VT8237";
}
else {
name = "Rhine III";
phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
if (pci_rev >= VT6105_B0)
quirks |= rq6patterns;
if (pci_rev < VT6105L)
mname = "VT6105";
else if (pci_rev < VT6107)
mname = "VT6105L";
else if (pci_rev < VT6105M)
mname = "VT6107";
else if (pci_rev >= VT6105M)
mname = "Management Adapter VT6105M";
if (pci_rev < VT6105M)
name = "Rhine III";
else
name = "Rhine III (Management Adapter)";
}
}
......@@ -790,6 +774,11 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
rp = netdev_priv(dev);
rp->quirks = quirks;
rp->pioaddr = pioaddr;
rp->pdev = pdev;
rc = pci_request_regions(pdev, DRV_NAME);
if (rc)
goto err_out_free_netdev;
......@@ -823,8 +812,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
#endif /* USE_MMIO */
dev->base_addr = ioaddr;
rp = netdev_priv(dev);
rp->quirks = quirks;
/* Get chip registers into a sane state */
rhine_power_init(dev);
......@@ -846,7 +833,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
dev->irq = pdev->irq;
spin_lock_init(&rp->lock);
rp->pdev = pdev;
rp->mii_if.dev = dev;
rp->mii_if.mdio_read = mdio_read;
rp->mii_if.mdio_write = mdio_write;
......@@ -874,8 +860,8 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
if (rc)
goto err_out_unmap;
printk(KERN_INFO "%s: VIA %s (%s) at 0x%lx, ",
dev->name, name, mname,
printk(KERN_INFO "%s: VIA %s at 0x%lx, ",
dev->name, name,
#ifdef USE_MMIO
memaddr
#else
......@@ -890,7 +876,10 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
{
u16 mii_cmd;
int mii_status = mdio_read(dev, phy_id, 1);
mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
if (mii_status != 0xffff && mii_status != 0x0000) {
rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
printk(KERN_INFO "%s: MII PHY found at address "
......@@ -1172,7 +1161,7 @@ static int mdio_read(struct net_device *dev, int phy_id, int regnum)
rhine_disable_linkmon(ioaddr, rp->quirks);
writeb(0, ioaddr + MIICmd);
/* rhine_disable_linkmon already cleared MIICmd */
writeb(phy_id, ioaddr + MIIPhyAddr);
writeb(regnum, ioaddr + MIIRegAddr);
writeb(0x40, ioaddr + MIICmd); /* Trigger read */
......@@ -1190,7 +1179,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value
rhine_disable_linkmon(ioaddr, rp->quirks);
writeb(0, ioaddr + MIICmd);
/* rhine_disable_linkmon already cleared MIICmd */
writeb(phy_id, ioaddr + MIIPhyAddr);
writeb(regnum, ioaddr + MIIRegAddr);
writew(value, ioaddr + MIIData);
......@@ -1951,11 +1940,70 @@ static void rhine_shutdown (struct device *gendev)
}
#ifdef CONFIG_PM
static int rhine_suspend(struct pci_dev *pdev, u32 state)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
unsigned long flags;
if (!netif_running(dev))
return 0;
netif_device_detach(dev);
pci_save_state(pdev, pdev->saved_config_space);
spin_lock_irqsave(&rp->lock, flags);
rhine_shutdown(&pdev->dev);
spin_unlock_irqrestore(&rp->lock, flags);
return 0;
}
static int rhine_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
unsigned long flags;
int ret;
if (!netif_running(dev))
return 0;
ret = pci_set_power_state(pdev, 0);
if (debug > 1)
printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
dev->name, ret ? "failed" : "succeeded", ret);
pci_restore_state(pdev, pdev->saved_config_space);
spin_lock_irqsave(&rp->lock, flags);
#ifdef USE_MMIO
enable_mmio(rp->pioaddr, rp->quirks);
#endif
rhine_power_init(dev);
free_tbufs(dev);
free_rbufs(dev);
alloc_tbufs(dev);
alloc_rbufs(dev);
init_registers(dev);
spin_unlock_irqrestore(&rp->lock, flags);
netif_device_attach(dev);
return 0;
}
#endif /* CONFIG_PM */
static struct pci_driver rhine_driver = {
.name = DRV_NAME,
.id_table = rhine_pci_tbl,
.probe = rhine_init_one,
.remove = __devexit_p(rhine_remove_one),
#ifdef CONFIG_PM
.suspend = rhine_suspend,
.resume = rhine_resume,
#endif /* CONFIG_PM */
.driver = {
.shutdown = rhine_shutdown,
}
......
......@@ -262,6 +262,7 @@ static u32 check_connection_type(struct mac_regs * regs);
static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status);
#ifdef CONFIG_PM
static int velocity_suspend(struct pci_dev *pdev, u32 state);
static int velocity_resume(struct pci_dev *pdev);
......@@ -270,9 +271,26 @@ static int velocity_netdev_event(struct notifier_block *nb, unsigned long notifi
static struct notifier_block velocity_inetaddr_notifier = {
.notifier_call = velocity_netdev_event,
};
static int velocity_notifier_registered;
#endif /* CONFIG_PM */
static spinlock_t velocity_dev_list_lock = SPIN_LOCK_UNLOCKED;
static LIST_HEAD(velocity_dev_list);
static void velocity_register_notifier(void)
{
register_inetaddr_notifier(&velocity_inetaddr_notifier);
}
static void velocity_unregister_notifier(void)
{
unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
}
#else /* CONFIG_PM */
#define velocity_register_notifier() do {} while (0)
#define velocity_unregister_notifier() do {} while (0)
#endif /* !CONFIG_PM */
/*
* Internal board variants. At the moment we have only one
......@@ -327,6 +345,14 @@ static void __devexit velocity_remove1(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct velocity_info *vptr = dev->priv;
#ifdef CONFIG_PM
unsigned long flags;
spin_lock_irqsave(&velocity_dev_list_lock, flags);
if (!list_empty(&velocity_dev_list))
list_del(&vptr->list);
spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
#endif
unregister_netdev(dev);
iounmap(vptr->mac_regs);
pci_release_regions(pdev);
......@@ -782,13 +808,16 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
/* and leave the chip powered down */
pci_set_power_state(pdev, 3);
out:
#ifdef CONFIG_PM
if (ret == 0 && !velocity_notifier_registered) {
velocity_notifier_registered = 1;
register_inetaddr_notifier(&velocity_inetaddr_notifier);
{
unsigned long flags;
spin_lock_irqsave(&velocity_dev_list_lock, flags);
list_add(&vptr->list, &velocity_dev_list);
spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
}
#endif
out:
return ret;
err_iounmap:
......@@ -843,6 +872,8 @@ static void __devinit velocity_init_info(struct pci_dev *pdev, struct velocity_i
spin_lock_init(&vptr->lock);
spin_lock_init(&vptr->xmit_lock);
INIT_LIST_HEAD(&vptr->list);
}
/**
......@@ -2211,8 +2242,11 @@ static struct pci_driver velocity_driver = {
static int __init velocity_init_module(void)
{
int ret;
ret = pci_module_init(&velocity_driver);
velocity_register_notifier();
ret = pci_module_init(&velocity_driver);
if (ret < 0)
velocity_unregister_notifier();
return ret;
}
......@@ -2227,12 +2261,7 @@ static int __init velocity_init_module(void)
static void __exit velocity_cleanup_module(void)
{
#ifdef CONFIG_PM
if (velocity_notifier_registered) {
unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
velocity_notifier_registered = 0;
}
#endif
velocity_unregister_notifier();
pci_unregister_driver(&velocity_driver);
}
......@@ -3252,13 +3281,20 @@ static int velocity_resume(struct pci_dev *pdev)
static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
{
struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
struct net_device *dev;
struct velocity_info *vptr;
if (ifa) {
dev = ifa->ifa_dev->dev;
vptr = dev->priv;
struct net_device *dev = ifa->ifa_dev->dev;
struct velocity_info *vptr;
unsigned long flags;
spin_lock_irqsave(&velocity_dev_list_lock, flags);
list_for_each_entry(vptr, &velocity_dev_list, list) {
if (vptr->dev == dev) {
velocity_get_ip(vptr);
break;
}
}
spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
}
return NOTIFY_DONE;
}
......
......@@ -1733,8 +1733,7 @@ struct velocity_opt {
};
struct velocity_info {
struct velocity_info *next;
struct velocity_info *prev;
struct list_head list;
struct pci_dev *pdev;
struct net_device *dev;
......
......@@ -186,7 +186,7 @@ static void nibble_to_byte(u8 *s, u8 *d, u8 len, u8 nibble),
reset_timer(struct net_device *dev);
static u8 bps_to_speed_code(u32 bps);
static u8 log2(u32 n);
static u8 cycx_log2(u32 n);
static unsigned dec_to_uint(u8 *str, int len);
......@@ -263,7 +263,7 @@ int cycx_x25_wan_init(struct cycx_device *card, wandev_conf_t *conf)
else
card->wandev.mtu = 64;
cfg.pktlen = log2(card->wandev.mtu);
cfg.pktlen = cycx_log2(card->wandev.mtu);
if (conf->station == WANOPT_DTE) {
cfg.locaddr = 3; /* DTE */
......@@ -1513,7 +1513,7 @@ static u8 bps_to_speed_code(u32 bps)
}
/* log base 2 */
static u8 log2(u32 n)
static u8 cycx_log2(u32 n)
{
u8 log = 0;
......
......@@ -105,6 +105,7 @@ extern int fix_alignment(struct pt_regs *regs);
extern void bad_page_fault(struct pt_regs *regs, unsigned long address,
int sig);
extern void show_regs(struct pt_regs * regs);
extern void low_hash_fault(struct pt_regs *regs, unsigned long address);
extern int die(const char *str, struct pt_regs *regs, long err);
extern void flush_instruction_cache(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment