Commit d5fc1d51 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp:
  amd64_edac: Minor formatting fix
  amd64_edac: Fix operator precendence error
  edac, mc: Improve scrub rate handling
  amd64_edac: Correct scrub rate setting
  amd64_edac: Fix DCT base address selector
  amd64_edac: Remove polling mechanism
  x86, mce: Notify about corrected events too
  amd64_edac: Remove unneeded defines
  edac: Remove EDAC_DEBUG_VERBOSE
  amd64_edac: Sanitize syndrome extraction
parents 694f690d c4799c75
...@@ -600,6 +600,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) ...@@ -600,6 +600,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
*/ */
if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce) { if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce) {
mce_log(&m); mce_log(&m);
atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, &m);
add_taint(TAINT_MACHINE_CHECK); add_taint(TAINT_MACHINE_CHECK);
} }
......
...@@ -39,14 +39,6 @@ config EDAC_DEBUG ...@@ -39,14 +39,6 @@ config EDAC_DEBUG
there're four debug levels (x=0,1,2,3 from low to high). there're four debug levels (x=0,1,2,3 from low to high).
Usually you should select 'N'. Usually you should select 'N'.
config EDAC_DEBUG_VERBOSE
bool "More verbose debugging"
depends on EDAC_DEBUG
help
This option makes debugging information more verbose.
Source file name and line number where debugging message
printed will be added to debugging message.
config EDAC_DECODE_MCE config EDAC_DECODE_MCE
tristate "Decode MCEs in human-readable form (only on AMD for now)" tristate "Decode MCEs in human-readable form (only on AMD for now)"
depends on CPU_SUP_AMD && X86_MCE depends on CPU_SUP_AMD && X86_MCE
......
This diff is collapsed.
...@@ -244,44 +244,17 @@ ...@@ -244,44 +244,17 @@
#define F10_DCTL_SEL_LOW 0x110 #define F10_DCTL_SEL_LOW 0x110
#define dct_sel_baseaddr(pvt) ((pvt->dram_ctl_select_low) & 0xFFFFF800)
#define dct_sel_baseaddr(pvt) \ #define dct_sel_interleave_addr(pvt) (((pvt->dram_ctl_select_low) >> 6) & 0x3)
((pvt->dram_ctl_select_low) & 0xFFFFF800) #define dct_high_range_enabled(pvt) (pvt->dram_ctl_select_low & BIT(0))
#define dct_interleave_enabled(pvt) (pvt->dram_ctl_select_low & BIT(2))
#define dct_sel_interleave_addr(pvt) \ #define dct_ganging_enabled(pvt) (pvt->dram_ctl_select_low & BIT(4))
(((pvt->dram_ctl_select_low) >> 6) & 0x3) #define dct_data_intlv_enabled(pvt) (pvt->dram_ctl_select_low & BIT(5))
#define dct_dram_enabled(pvt) (pvt->dram_ctl_select_low & BIT(8))
enum { #define dct_memory_cleared(pvt) (pvt->dram_ctl_select_low & BIT(10))
F10_DCTL_SEL_LOW_DctSelHiRngEn = BIT(0),
F10_DCTL_SEL_LOW_DctSelIntLvEn = BIT(2),
F10_DCTL_SEL_LOW_DctGangEn = BIT(4),
F10_DCTL_SEL_LOW_DctDatIntLv = BIT(5),
F10_DCTL_SEL_LOW_DramEnable = BIT(8),
F10_DCTL_SEL_LOW_MemCleared = BIT(10),
};
#define dct_high_range_enabled(pvt) \
(pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctSelHiRngEn)
#define dct_interleave_enabled(pvt) \
(pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctSelIntLvEn)
#define dct_ganging_enabled(pvt) \
(pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctGangEn)
#define dct_data_intlv_enabled(pvt) \
(pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctDatIntLv)
#define dct_dram_enabled(pvt) \
(pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DramEnable)
#define dct_memory_cleared(pvt) \
(pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_MemCleared)
#define F10_DCTL_SEL_HIGH 0x114 #define F10_DCTL_SEL_HIGH 0x114
/* /*
* Function 3 - Misc Control * Function 3 - Misc Control
*/ */
...@@ -382,6 +355,8 @@ enum { ...@@ -382,6 +355,8 @@ enum {
#define K8_NBCAP_SECDED BIT(3) #define K8_NBCAP_SECDED BIT(3)
#define K8_NBCAP_DCT_DUAL BIT(0) #define K8_NBCAP_DCT_DUAL BIT(0)
#define EXT_NB_MCA_CFG 0x180
/* MSRs */ /* MSRs */
#define K8_MSR_MCGCTL_NBE BIT(4) #define K8_MSR_MCGCTL_NBE BIT(4)
...@@ -471,6 +446,9 @@ struct amd64_pvt { ...@@ -471,6 +446,9 @@ struct amd64_pvt {
u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */ u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */
u32 online_spare; /* On-Line spare Reg */ u32 online_spare; /* On-Line spare Reg */
/* x4 or x8 syndromes in use */
u8 syn_type;
/* temp storage for when input is received from sysfs */ /* temp storage for when input is received from sysfs */
struct err_regs ctl_error_info; struct err_regs ctl_error_info;
......
...@@ -958,7 +958,7 @@ static void e752x_check(struct mem_ctl_info *mci) ...@@ -958,7 +958,7 @@ static void e752x_check(struct mem_ctl_info *mci)
} }
/* Program byte/sec bandwidth scrub rate to hardware */ /* Program byte/sec bandwidth scrub rate to hardware */
static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *new_bw) static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
{ {
const struct scrubrate *scrubrates; const struct scrubrate *scrubrates;
struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
...@@ -975,7 +975,7 @@ static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *new_bw) ...@@ -975,7 +975,7 @@ static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *new_bw)
* desired rate and program the cooresponding register value. * desired rate and program the cooresponding register value.
*/ */
for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++) for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
if (scrubrates[i].bandwidth >= *new_bw) if (scrubrates[i].bandwidth >= new_bw)
break; break;
if (scrubrates[i].bandwidth == SDRATE_EOT) if (scrubrates[i].bandwidth == SDRATE_EOT)
......
...@@ -49,21 +49,15 @@ ...@@ -49,21 +49,15 @@
#define edac_printk(level, prefix, fmt, arg...) \ #define edac_printk(level, prefix, fmt, arg...) \
printk(level "EDAC " prefix ": " fmt, ##arg) printk(level "EDAC " prefix ": " fmt, ##arg)
#define edac_printk_verbose(level, prefix, fmt, arg...) \
printk(level "EDAC " prefix ": " "in %s, line at %d: " fmt, \
__FILE__, __LINE__, ##arg)
#define edac_mc_printk(mci, level, fmt, arg...) \ #define edac_mc_printk(mci, level, fmt, arg...) \
printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg) printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg)
#define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \ #define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \
printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg) printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg)
/* edac_device printk */
#define edac_device_printk(ctl, level, fmt, arg...) \ #define edac_device_printk(ctl, level, fmt, arg...) \
printk(level "EDAC DEVICE%d: " fmt, ctl->dev_idx, ##arg) printk(level "EDAC DEVICE%d: " fmt, ctl->dev_idx, ##arg)
/* edac_pci printk */
#define edac_pci_printk(ctl, level, fmt, arg...) \ #define edac_pci_printk(ctl, level, fmt, arg...) \
printk(level "EDAC PCI%d: " fmt, ctl->pci_idx, ##arg) printk(level "EDAC PCI%d: " fmt, ctl->pci_idx, ##arg)
...@@ -76,21 +70,12 @@ ...@@ -76,21 +70,12 @@
extern int edac_debug_level; extern int edac_debug_level;
extern const char *edac_mem_types[]; extern const char *edac_mem_types[];
#ifndef CONFIG_EDAC_DEBUG_VERBOSE
#define edac_debug_printk(level, fmt, arg...) \ #define edac_debug_printk(level, fmt, arg...) \
do { \ do { \
if (level <= edac_debug_level) \ if (level <= edac_debug_level) \
edac_printk(KERN_DEBUG, EDAC_DEBUG, \ edac_printk(KERN_DEBUG, EDAC_DEBUG, \
"%s: " fmt, __func__, ##arg); \ "%s: " fmt, __func__, ##arg); \
} while (0) } while (0)
#else /* CONFIG_EDAC_DEBUG_VERBOSE */
#define edac_debug_printk(level, fmt, arg...) \
do { \
if (level <= edac_debug_level) \
edac_printk_verbose(KERN_DEBUG, EDAC_DEBUG, fmt, \
##arg); \
} while (0)
#endif
#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ ) #define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ )
#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ ) #define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ )
...@@ -393,7 +378,7 @@ struct mem_ctl_info { ...@@ -393,7 +378,7 @@ struct mem_ctl_info {
internal representation and configures whatever else needs internal representation and configures whatever else needs
to be configured. to be configured.
*/ */
int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 * bw); int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 bw);
/* Get the current sdram memory scrub rate from the internal /* Get the current sdram memory scrub rate from the internal
representation and converts it to the closest matching representation and converts it to the closest matching
......
...@@ -124,19 +124,6 @@ static const char *edac_caps[] = { ...@@ -124,19 +124,6 @@ static const char *edac_caps[] = {
[EDAC_S16ECD16ED] = "S16ECD16ED" [EDAC_S16ECD16ED] = "S16ECD16ED"
}; };
static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
{
int *value = (int *)ptr;
if (isdigit(*buffer))
*value = simple_strtoul(buffer, NULL, 0);
return count;
}
/* EDAC sysfs CSROW data structures and methods /* EDAC sysfs CSROW data structures and methods
*/ */
...@@ -452,51 +439,52 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci, ...@@ -452,51 +439,52 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci, static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci,
const char *data, size_t count) const char *data, size_t count)
{ {
u32 bandwidth = -1; unsigned long bandwidth = 0;
int err;
if (mci->set_sdram_scrub_rate) { if (!mci->set_sdram_scrub_rate) {
edac_printk(KERN_WARNING, EDAC_MC,
"Memory scrub rate setting not implemented!\n");
return -EINVAL;
}
memctrl_int_store(&bandwidth, data, count); if (strict_strtoul(data, 10, &bandwidth) < 0)
return -EINVAL;
if (!(*mci->set_sdram_scrub_rate) (mci, &bandwidth)) { err = mci->set_sdram_scrub_rate(mci, (u32)bandwidth);
edac_printk(KERN_DEBUG, EDAC_MC, if (err) {
"Scrub rate set successfully, applied: %d\n",
bandwidth);
} else {
/* FIXME: error codes maybe? */
edac_printk(KERN_DEBUG, EDAC_MC, edac_printk(KERN_DEBUG, EDAC_MC,
"Scrub rate set FAILED, could not apply: %d\n", "Failed setting scrub rate to %lu\n", bandwidth);
bandwidth); return -EINVAL;
}
} else {
/* FIXME: produce "not implemented" ERROR for user-side. */
edac_printk(KERN_WARNING, EDAC_MC,
"Memory scrubbing 'set'control is not implemented!\n");
} }
else {
edac_printk(KERN_DEBUG, EDAC_MC,
"Scrub rate set to: %lu\n", bandwidth);
return count; return count;
}
} }
static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data) static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
{ {
u32 bandwidth = -1; u32 bandwidth = 0;
int err;
if (mci->get_sdram_scrub_rate) { if (!mci->get_sdram_scrub_rate) {
if (!(*mci->get_sdram_scrub_rate) (mci, &bandwidth)) {
edac_printk(KERN_DEBUG, EDAC_MC,
"Scrub rate successfully, fetched: %d\n",
bandwidth);
} else {
/* FIXME: error codes maybe? */
edac_printk(KERN_DEBUG, EDAC_MC,
"Scrub rate fetch FAILED, got: %d\n",
bandwidth);
}
} else {
/* FIXME: produce "not implemented" ERROR for user-side. */
edac_printk(KERN_WARNING, EDAC_MC, edac_printk(KERN_WARNING, EDAC_MC,
"Memory scrubbing 'get' control is not implemented\n"); "Memory scrub rate reading not implemented\n");
return -EINVAL;
} }
err = mci->get_sdram_scrub_rate(mci, &bandwidth);
if (err) {
edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n");
return err;
}
else {
edac_printk(KERN_DEBUG, EDAC_MC,
"Read scrub rate: %d\n", bandwidth);
return sprintf(data, "%d\n", bandwidth); return sprintf(data, "%d\n", bandwidth);
}
} }
/* default attribute files for the MCI object */ /* default attribute files for the MCI object */
......
...@@ -133,7 +133,7 @@ static void amd_decode_dc_mce(u64 mc0_status) ...@@ -133,7 +133,7 @@ static void amd_decode_dc_mce(u64 mc0_status)
u32 ec = mc0_status & 0xffff; u32 ec = mc0_status & 0xffff;
u32 xec = (mc0_status >> 16) & 0xf; u32 xec = (mc0_status >> 16) & 0xf;
pr_emerg(" Data Cache Error"); pr_emerg("Data Cache Error");
if (xec == 1 && TLB_ERROR(ec)) if (xec == 1 && TLB_ERROR(ec))
pr_cont(": %s TLB multimatch.\n", LL_MSG(ec)); pr_cont(": %s TLB multimatch.\n", LL_MSG(ec));
...@@ -176,7 +176,7 @@ static void amd_decode_ic_mce(u64 mc1_status) ...@@ -176,7 +176,7 @@ static void amd_decode_ic_mce(u64 mc1_status)
u32 ec = mc1_status & 0xffff; u32 ec = mc1_status & 0xffff;
u32 xec = (mc1_status >> 16) & 0xf; u32 xec = (mc1_status >> 16) & 0xf;
pr_emerg(" Instruction Cache Error"); pr_emerg("Instruction Cache Error");
if (xec == 1 && TLB_ERROR(ec)) if (xec == 1 && TLB_ERROR(ec))
pr_cont(": %s TLB multimatch.\n", LL_MSG(ec)); pr_cont(": %s TLB multimatch.\n", LL_MSG(ec));
...@@ -233,7 +233,7 @@ static void amd_decode_bu_mce(u64 mc2_status) ...@@ -233,7 +233,7 @@ static void amd_decode_bu_mce(u64 mc2_status)
u32 ec = mc2_status & 0xffff; u32 ec = mc2_status & 0xffff;
u32 xec = (mc2_status >> 16) & 0xf; u32 xec = (mc2_status >> 16) & 0xf;
pr_emerg(" Bus Unit Error"); pr_emerg("Bus Unit Error");
if (xec == 0x1) if (xec == 0x1)
pr_cont(" in the write data buffers.\n"); pr_cont(" in the write data buffers.\n");
...@@ -275,7 +275,7 @@ static void amd_decode_ls_mce(u64 mc3_status) ...@@ -275,7 +275,7 @@ static void amd_decode_ls_mce(u64 mc3_status)
u32 ec = mc3_status & 0xffff; u32 ec = mc3_status & 0xffff;
u32 xec = (mc3_status >> 16) & 0xf; u32 xec = (mc3_status >> 16) & 0xf;
pr_emerg(" Load Store Error"); pr_emerg("Load Store Error");
if (xec == 0x0) { if (xec == 0x0) {
u8 rrrr = (ec >> 4) & 0xf; u8 rrrr = (ec >> 4) & 0xf;
...@@ -304,7 +304,7 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors) ...@@ -304,7 +304,7 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
if (TLB_ERROR(ec) && !report_gart_errors) if (TLB_ERROR(ec) && !report_gart_errors)
return; return;
pr_emerg(" Northbridge Error, node %d", node_id); pr_emerg("Northbridge Error, node %d", node_id);
/* /*
* F10h, revD can disable ErrCpu[3:0] so check that first and also the * F10h, revD can disable ErrCpu[3:0] so check that first and also the
...@@ -342,13 +342,13 @@ static void amd_decode_fr_mce(u64 mc5_status) ...@@ -342,13 +342,13 @@ static void amd_decode_fr_mce(u64 mc5_status)
static inline void amd_decode_err_code(unsigned int ec) static inline void amd_decode_err_code(unsigned int ec)
{ {
if (TLB_ERROR(ec)) { if (TLB_ERROR(ec)) {
pr_emerg(" Transaction: %s, Cache Level %s\n", pr_emerg("Transaction: %s, Cache Level %s\n",
TT_MSG(ec), LL_MSG(ec)); TT_MSG(ec), LL_MSG(ec));
} else if (MEM_ERROR(ec)) { } else if (MEM_ERROR(ec)) {
pr_emerg(" Transaction: %s, Type: %s, Cache Level: %s", pr_emerg("Transaction: %s, Type: %s, Cache Level: %s",
RRRR_MSG(ec), TT_MSG(ec), LL_MSG(ec)); RRRR_MSG(ec), TT_MSG(ec), LL_MSG(ec));
} else if (BUS_ERROR(ec)) { } else if (BUS_ERROR(ec)) {
pr_emerg(" Transaction type: %s(%s), %s, Cache Level: %s, " pr_emerg("Transaction type: %s(%s), %s, Cache Level: %s, "
"Participating Processor: %s\n", "Participating Processor: %s\n",
RRRR_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec), RRRR_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec),
PP_MSG(ec)); PP_MSG(ec));
......
...@@ -589,14 +589,13 @@ static void i5100_refresh_scrubbing(struct work_struct *work) ...@@ -589,14 +589,13 @@ static void i5100_refresh_scrubbing(struct work_struct *work)
/* /*
* The bandwidth is based on experimentation, feel free to refine it. * The bandwidth is based on experimentation, feel free to refine it.
*/ */
static int i5100_set_scrub_rate(struct mem_ctl_info *mci, static int i5100_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth)
u32 *bandwidth)
{ {
struct i5100_priv *priv = mci->pvt_info; struct i5100_priv *priv = mci->pvt_info;
u32 dw; u32 dw;
pci_read_config_dword(priv->mc, I5100_MC, &dw); pci_read_config_dword(priv->mc, I5100_MC, &dw);
if (*bandwidth) { if (bandwidth) {
priv->scrub_enable = 1; priv->scrub_enable = 1;
dw |= I5100_MC_SCRBEN_MASK; dw |= I5100_MC_SCRBEN_MASK;
schedule_delayed_work(&(priv->i5100_scrubbing), schedule_delayed_work(&(priv->i5100_scrubbing),
...@@ -610,7 +609,7 @@ static int i5100_set_scrub_rate(struct mem_ctl_info *mci, ...@@ -610,7 +609,7 @@ static int i5100_set_scrub_rate(struct mem_ctl_info *mci,
pci_read_config_dword(priv->mc, I5100_MC, &dw); pci_read_config_dword(priv->mc, I5100_MC, &dw);
*bandwidth = 5900000 * i5100_mc_scrben(dw); bandwidth = 5900000 * i5100_mc_scrben(dw);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment