Commit cef12ee5 authored by Liu, Jinsong's avatar Liu, Jinsong Committed by Konrad Rzeszutek Wilk

xen/mce: Add mcelog support for Xen platform

When MCA error occurs, it would be handled by Xen hypervisor first,
and then the error information would be sent to initial domain for logging.

This patch gets error information from Xen hypervisor and convert
Xen format error into Linux format mcelog. This logic is basically
self-contained, not touching other kernel components.

By using tools like mcelog tool users could read specific error information,
like what they did under native Linux.

To test follow directions outlined in Documentation/acpi/apei/einj.txt
Acked-and-tested-by: default avatarBorislav Petkov <borislav.petkov@amd.com>
Signed-off-by: default avatarKe, Liping <liping.ke@intel.com>
Signed-off-by: default avatarJiang, Yunhong <yunhong.jiang@intel.com>
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: default avatarLiu, Jinsong <jinsong.liu@intel.com>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 485802a6
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
#include <xen/interface/sched.h> #include <xen/interface/sched.h>
#include <xen/interface/physdev.h> #include <xen/interface/physdev.h>
#include <xen/interface/platform.h> #include <xen/interface/platform.h>
#include <xen/interface/xen-mca.h>
/* /*
* The hypercall asms have to meet several constraints: * The hypercall asms have to meet several constraints:
...@@ -301,6 +302,13 @@ HYPERVISOR_set_timer_op(u64 timeout) ...@@ -301,6 +302,13 @@ HYPERVISOR_set_timer_op(u64 timeout)
return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi); return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
} }
static inline int
HYPERVISOR_mca(struct xen_mc *mc_op)
{
mc_op->interface_version = XEN_MCA_INTERFACE_VERSION;
return _hypercall1(int, mca, mc_op);
}
static inline int static inline int
HYPERVISOR_dom0_op(struct xen_platform_op *platform_op) HYPERVISOR_dom0_op(struct xen_platform_op *platform_op)
{ {
......
...@@ -57,8 +57,6 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex); ...@@ -57,8 +57,6 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex);
int mce_disabled __read_mostly; int mce_disabled __read_mostly;
#define MISC_MCELOG_MINOR 227
#define SPINUNIT 100 /* 100ns */ #define SPINUNIT 100 /* 100ns */
atomic_t mce_entry; atomic_t mce_entry;
...@@ -2342,7 +2340,7 @@ static __init int mcheck_init_device(void) ...@@ -2342,7 +2340,7 @@ static __init int mcheck_init_device(void)
return err; return err;
} }
device_initcall(mcheck_init_device); device_initcall_sync(mcheck_init_device);
/* /*
* Old style boot options parsing. Only for compatibility. * Old style boot options parsing. Only for compatibility.
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <xen/interface/physdev.h> #include <xen/interface/physdev.h>
#include <xen/interface/vcpu.h> #include <xen/interface/vcpu.h>
#include <xen/interface/memory.h> #include <xen/interface/memory.h>
#include <xen/interface/xen-mca.h>
#include <xen/features.h> #include <xen/features.h>
#include <xen/page.h> #include <xen/page.h>
#include <xen/hvm.h> #include <xen/hvm.h>
...@@ -341,9 +342,7 @@ static void __init xen_init_cpuid_mask(void) ...@@ -341,9 +342,7 @@ static void __init xen_init_cpuid_mask(void)
unsigned int xsave_mask; unsigned int xsave_mask;
cpuid_leaf1_edx_mask = cpuid_leaf1_edx_mask =
~((1 << X86_FEATURE_MCE) | /* disable MCE */ ~((1 << X86_FEATURE_MTRR) | /* disable MTRR */
(1 << X86_FEATURE_MCA) | /* disable MCA */
(1 << X86_FEATURE_MTRR) | /* disable MTRR */
(1 << X86_FEATURE_ACC)); /* thermal monitoring */ (1 << X86_FEATURE_ACC)); /* thermal monitoring */
if (!xen_initial_domain()) if (!xen_initial_domain())
......
...@@ -196,4 +196,12 @@ config XEN_ACPI_PROCESSOR ...@@ -196,4 +196,12 @@ config XEN_ACPI_PROCESSOR
called xen_acpi_processor If you do not know what to choose, select called xen_acpi_processor If you do not know what to choose, select
M here. If the CPUFREQ drivers are built in, select Y here. M here. If the CPUFREQ drivers are built in, select Y here.
config XEN_MCE_LOG
bool "Xen platform mcelog"
depends on XEN_DOM0 && X86_64 && X86_MCE
default n
help
Allow kernel fetching MCE error from Xen platform and
converting it into Linux mcelog format for mcelog tools
endmenu endmenu
...@@ -18,6 +18,7 @@ obj-$(CONFIG_XEN_PVHVM) += platform-pci.o ...@@ -18,6 +18,7 @@ obj-$(CONFIG_XEN_PVHVM) += platform-pci.o
obj-$(CONFIG_XEN_TMEM) += tmem.o obj-$(CONFIG_XEN_TMEM) += tmem.o
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
obj-$(CONFIG_XEN_DOM0) += pci.o acpi.o obj-$(CONFIG_XEN_DOM0) += pci.o acpi.o
obj-$(CONFIG_XEN_MCE_LOG) += mcelog.o
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/ obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o
obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o
......
/******************************************************************************
* mcelog.c
* Driver for receiving and transferring machine check error infomation
*
* Copyright (c) 2012 Intel Corporation
* Author: Liu, Jinsong <jinsong.liu@intel.com>
* Author: Jiang, Yunhong <yunhong.jiang@intel.com>
* Author: Ke, Liping <liping.ke@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/capability.h>
#include <xen/interface/xen.h>
#include <xen/events.h>
#include <xen/interface/vcpu.h>
#include <xen/xen.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
#define XEN_MCELOG "xen_mcelog: "
static struct mc_info g_mi;
static struct mcinfo_logical_cpu *g_physinfo;
static uint32_t ncpus;
static DEFINE_SPINLOCK(mcelog_lock);
static struct xen_mce_log xen_mcelog = {
.signature = XEN_MCE_LOG_SIGNATURE,
.len = XEN_MCE_LOG_LEN,
.recordlen = sizeof(struct xen_mce),
};
static DEFINE_SPINLOCK(xen_mce_chrdev_state_lock);
static int xen_mce_chrdev_open_count; /* #times opened */
static int xen_mce_chrdev_open_exclu; /* already open exclusive? */
static int xen_mce_chrdev_open(struct inode *inode, struct file *file)
{
spin_lock(&xen_mce_chrdev_state_lock);
if (xen_mce_chrdev_open_exclu ||
(xen_mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
spin_unlock(&xen_mce_chrdev_state_lock);
return -EBUSY;
}
if (file->f_flags & O_EXCL)
xen_mce_chrdev_open_exclu = 1;
xen_mce_chrdev_open_count++;
spin_unlock(&xen_mce_chrdev_state_lock);
return nonseekable_open(inode, file);
}
static int xen_mce_chrdev_release(struct inode *inode, struct file *file)
{
spin_lock(&xen_mce_chrdev_state_lock);
xen_mce_chrdev_open_count--;
xen_mce_chrdev_open_exclu = 0;
spin_unlock(&xen_mce_chrdev_state_lock);
return 0;
}
static ssize_t xen_mce_chrdev_read(struct file *filp, char __user *ubuf,
size_t usize, loff_t *off)
{
char __user *buf = ubuf;
unsigned num;
int i, err;
spin_lock(&mcelog_lock);
num = xen_mcelog.next;
/* Only supports full reads right now */
err = -EINVAL;
if (*off != 0 || usize < XEN_MCE_LOG_LEN*sizeof(struct xen_mce))
goto out;
err = 0;
for (i = 0; i < num; i++) {
struct xen_mce *m = &xen_mcelog.entry[i];
err |= copy_to_user(buf, m, sizeof(*m));
buf += sizeof(*m);
}
memset(xen_mcelog.entry, 0, num * sizeof(struct xen_mce));
xen_mcelog.next = 0;
if (err)
err = -EFAULT;
out:
spin_unlock(&mcelog_lock);
return err ? err : buf - ubuf;
}
static long xen_mce_chrdev_ioctl(struct file *f, unsigned int cmd,
unsigned long arg)
{
int __user *p = (int __user *)arg;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
switch (cmd) {
case MCE_GET_RECORD_LEN:
return put_user(sizeof(struct xen_mce), p);
case MCE_GET_LOG_LEN:
return put_user(XEN_MCE_LOG_LEN, p);
case MCE_GETCLEAR_FLAGS: {
unsigned flags;
do {
flags = xen_mcelog.flags;
} while (cmpxchg(&xen_mcelog.flags, flags, 0) != flags);
return put_user(flags, p);
}
default:
return -ENOTTY;
}
}
static const struct file_operations xen_mce_chrdev_ops = {
.open = xen_mce_chrdev_open,
.release = xen_mce_chrdev_release,
.read = xen_mce_chrdev_read,
.unlocked_ioctl = xen_mce_chrdev_ioctl,
.llseek = no_llseek,
};
static struct miscdevice xen_mce_chrdev_device = {
MISC_MCELOG_MINOR,
"mcelog",
&xen_mce_chrdev_ops,
};
/*
* Caller should hold the mcelog_lock
*/
static void xen_mce_log(struct xen_mce *mce)
{
unsigned entry;
entry = xen_mcelog.next;
/*
* When the buffer fills up discard new entries.
* Assume that the earlier errors are the more
* interesting ones:
*/
if (entry >= XEN_MCE_LOG_LEN) {
set_bit(XEN_MCE_OVERFLOW,
(unsigned long *)&xen_mcelog.flags);
return;
}
memcpy(xen_mcelog.entry + entry, mce, sizeof(struct xen_mce));
xen_mcelog.next++;
}
static int convert_log(struct mc_info *mi)
{
struct mcinfo_common *mic;
struct mcinfo_global *mc_global;
struct mcinfo_bank *mc_bank;
struct xen_mce m;
uint32_t i;
mic = NULL;
x86_mcinfo_lookup(&mic, mi, MC_TYPE_GLOBAL);
if (unlikely(!mic)) {
pr_warning(XEN_MCELOG "Failed to find global error info\n");
return -ENODEV;
}
memset(&m, 0, sizeof(struct xen_mce));
mc_global = (struct mcinfo_global *)mic;
m.mcgstatus = mc_global->mc_gstatus;
m.apicid = mc_global->mc_apicid;
for (i = 0; i < ncpus; i++)
if (g_physinfo[i].mc_apicid == m.apicid)
break;
if (unlikely(i == ncpus)) {
pr_warning(XEN_MCELOG "Failed to match cpu with apicid %d\n",
m.apicid);
return -ENODEV;
}
m.socketid = g_physinfo[i].mc_chipid;
m.cpu = m.extcpu = g_physinfo[i].mc_cpunr;
m.cpuvendor = (__u8)g_physinfo[i].mc_vendor;
m.mcgcap = g_physinfo[i].mc_msrvalues[__MC_MSR_MCGCAP].value;
mic = NULL;
x86_mcinfo_lookup(&mic, mi, MC_TYPE_BANK);
if (unlikely(!mic)) {
pr_warning(XEN_MCELOG "Fail to find bank error info\n");
return -ENODEV;
}
do {
if ((!mic) || (mic->size == 0) ||
(mic->type != MC_TYPE_GLOBAL &&
mic->type != MC_TYPE_BANK &&
mic->type != MC_TYPE_EXTENDED &&
mic->type != MC_TYPE_RECOVERY))
break;
if (mic->type == MC_TYPE_BANK) {
mc_bank = (struct mcinfo_bank *)mic;
m.misc = mc_bank->mc_misc;
m.status = mc_bank->mc_status;
m.addr = mc_bank->mc_addr;
m.tsc = mc_bank->mc_tsc;
m.bank = mc_bank->mc_bank;
m.finished = 1;
/*log this record*/
xen_mce_log(&m);
}
mic = x86_mcinfo_next(mic);
} while (1);
return 0;
}
static int mc_queue_handle(uint32_t flags)
{
struct xen_mc mc_op;
int ret = 0;
mc_op.cmd = XEN_MC_fetch;
mc_op.interface_version = XEN_MCA_INTERFACE_VERSION;
set_xen_guest_handle(mc_op.u.mc_fetch.data, &g_mi);
do {
mc_op.u.mc_fetch.flags = flags;
ret = HYPERVISOR_mca(&mc_op);
if (ret) {
pr_err(XEN_MCELOG "Failed to fetch %s error log\n",
(flags == XEN_MC_URGENT) ?
"urgnet" : "nonurgent");
break;
}
if (mc_op.u.mc_fetch.flags & XEN_MC_NODATA ||
mc_op.u.mc_fetch.flags & XEN_MC_FETCHFAILED)
break;
else {
ret = convert_log(&g_mi);
if (ret)
pr_warning(XEN_MCELOG
"Failed to convert this error log, "
"continue acking it anyway\n");
mc_op.u.mc_fetch.flags = flags | XEN_MC_ACK;
ret = HYPERVISOR_mca(&mc_op);
if (ret) {
pr_err(XEN_MCELOG
"Failed to ack previous error log\n");
break;
}
}
} while (1);
return ret;
}
/* virq handler for machine check error info*/
static irqreturn_t xen_mce_interrupt(int irq, void *dev_id)
{
int err;
unsigned long tmp;
spin_lock_irqsave(&mcelog_lock, tmp);
/* urgent mc_info */
err = mc_queue_handle(XEN_MC_URGENT);
if (err)
pr_err(XEN_MCELOG
"Failed to handle urgent mc_info queue, "
"continue handling nonurgent mc_info queue anyway.\n");
/* nonurgent mc_info */
err = mc_queue_handle(XEN_MC_NONURGENT);
if (err)
pr_err(XEN_MCELOG
"Failed to handle nonurgent mc_info queue.\n");
spin_unlock_irqrestore(&mcelog_lock, tmp);
return IRQ_HANDLED;
}
static int bind_virq_for_mce(void)
{
int ret;
struct xen_mc mc_op;
memset(&mc_op, 0, sizeof(struct xen_mc));
/* Fetch physical CPU Numbers */
mc_op.cmd = XEN_MC_physcpuinfo;
mc_op.interface_version = XEN_MCA_INTERFACE_VERSION;
set_xen_guest_handle(mc_op.u.mc_physcpuinfo.info, g_physinfo);
ret = HYPERVISOR_mca(&mc_op);
if (ret) {
pr_err(XEN_MCELOG "Failed to get CPU numbers\n");
return ret;
}
/* Fetch each CPU Physical Info for later reference*/
ncpus = mc_op.u.mc_physcpuinfo.ncpus;
g_physinfo = kcalloc(ncpus, sizeof(struct mcinfo_logical_cpu),
GFP_KERNEL);
if (!g_physinfo)
return -ENOMEM;
set_xen_guest_handle(mc_op.u.mc_physcpuinfo.info, g_physinfo);
ret = HYPERVISOR_mca(&mc_op);
if (ret) {
pr_err(XEN_MCELOG "Failed to get CPU info\n");
kfree(g_physinfo);
return ret;
}
ret = bind_virq_to_irqhandler(VIRQ_MCA, 0,
xen_mce_interrupt, 0, "mce", NULL);
if (ret < 0) {
pr_err(XEN_MCELOG "Failed to bind virq\n");
kfree(g_physinfo);
return ret;
}
return 0;
}
static int __init xen_late_init_mcelog(void)
{
/* Only DOM0 is responsible for MCE logging */
if (xen_initial_domain()) {
/* register character device /dev/mcelog for xen mcelog */
if (misc_register(&xen_mce_chrdev_device))
return -ENODEV;
return bind_virq_for_mce();
}
return -ENODEV;
}
device_initcall(xen_late_init_mcelog);
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#define MPT_MINOR 220 #define MPT_MINOR 220
#define MPT2SAS_MINOR 221 #define MPT2SAS_MINOR 221
#define UINPUT_MINOR 223 #define UINPUT_MINOR 223
#define MISC_MCELOG_MINOR 227
#define HPET_MINOR 228 #define HPET_MINOR 228
#define FUSE_MINOR 229 #define FUSE_MINOR 229
#define KVM_MINOR 232 #define KVM_MINOR 232
......
/******************************************************************************
* arch-x86/mca.h
* Guest OS machine check interface to x86 Xen.
*
* Contributed by Advanced Micro Devices, Inc.
* Author: Christoph Egger <Christoph.Egger@amd.com>
*
* Updated by Intel Corporation
* Author: Liu, Jinsong <jinsong.liu@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __XEN_PUBLIC_ARCH_X86_MCA_H__
#define __XEN_PUBLIC_ARCH_X86_MCA_H__
/* Hypercall */
#define __HYPERVISOR_mca __HYPERVISOR_arch_0
#define XEN_MCA_INTERFACE_VERSION 0x01ecc003
/* IN: Dom0 calls hypercall to retrieve nonurgent error log entry */
#define XEN_MC_NONURGENT 0x1
/* IN: Dom0 calls hypercall to retrieve urgent error log entry */
#define XEN_MC_URGENT 0x2
/* IN: Dom0 acknowledges previosly-fetched error log entry */
#define XEN_MC_ACK 0x4
/* OUT: All is ok */
#define XEN_MC_OK 0x0
/* OUT: Domain could not fetch data. */
#define XEN_MC_FETCHFAILED 0x1
/* OUT: There was no machine check data to fetch. */
#define XEN_MC_NODATA 0x2
#ifndef __ASSEMBLY__
/* vIRQ injected to Dom0 */
#define VIRQ_MCA VIRQ_ARCH_0
/*
* mc_info entry types
* mca machine check info are recorded in mc_info entries.
* when fetch mca info, it can use MC_TYPE_... to distinguish
* different mca info.
*/
#define MC_TYPE_GLOBAL 0
#define MC_TYPE_BANK 1
#define MC_TYPE_EXTENDED 2
#define MC_TYPE_RECOVERY 3
struct mcinfo_common {
uint16_t type; /* structure type */
uint16_t size; /* size of this struct in bytes */
};
#define MC_FLAG_CORRECTABLE (1 << 0)
#define MC_FLAG_UNCORRECTABLE (1 << 1)
#define MC_FLAG_RECOVERABLE (1 << 2)
#define MC_FLAG_POLLED (1 << 3)
#define MC_FLAG_RESET (1 << 4)
#define MC_FLAG_CMCI (1 << 5)
#define MC_FLAG_MCE (1 << 6)
/* contains x86 global mc information */
struct mcinfo_global {
struct mcinfo_common common;
uint16_t mc_domid; /* running domain at the time in error */
uint16_t mc_vcpuid; /* virtual cpu scheduled for mc_domid */
uint32_t mc_socketid; /* physical socket of the physical core */
uint16_t mc_coreid; /* physical impacted core */
uint16_t mc_core_threadid; /* core thread of physical core */
uint32_t mc_apicid;
uint32_t mc_flags;
uint64_t mc_gstatus; /* global status */
};
/* contains x86 bank mc information */
struct mcinfo_bank {
struct mcinfo_common common;
uint16_t mc_bank; /* bank nr */
uint16_t mc_domid; /* domain referenced by mc_addr if valid */
uint64_t mc_status; /* bank status */
uint64_t mc_addr; /* bank address */
uint64_t mc_misc;
uint64_t mc_ctrl2;
uint64_t mc_tsc;
};
struct mcinfo_msr {
uint64_t reg; /* MSR */
uint64_t value; /* MSR value */
};
/* contains mc information from other or additional mc MSRs */
struct mcinfo_extended {
struct mcinfo_common common;
uint32_t mc_msrs; /* Number of msr with valid values. */
/*
* Currently Intel extended MSR (32/64) include all gp registers
* and E(R)FLAGS, E(R)IP, E(R)MISC, up to 11/19 of them might be
* useful at present. So expand this array to 16/32 to leave room.
*/
struct mcinfo_msr mc_msr[sizeof(void *) * 4];
};
/* Recovery Action flags. Giving recovery result information to DOM0 */
/* Xen takes successful recovery action, the error is recovered */
#define REC_ACTION_RECOVERED (0x1 << 0)
/* No action is performed by XEN */
#define REC_ACTION_NONE (0x1 << 1)
/* It's possible DOM0 might take action ownership in some case */
#define REC_ACTION_NEED_RESET (0x1 << 2)
/*
* Different Recovery Action types, if the action is performed successfully,
* REC_ACTION_RECOVERED flag will be returned.
*/
/* Page Offline Action */
#define MC_ACTION_PAGE_OFFLINE (0x1 << 0)
/* CPU offline Action */
#define MC_ACTION_CPU_OFFLINE (0x1 << 1)
/* L3 cache disable Action */
#define MC_ACTION_CACHE_SHRINK (0x1 << 2)
/*
* Below interface used between XEN/DOM0 for passing XEN's recovery action
* information to DOM0.
*/
struct page_offline_action {
/* Params for passing the offlined page number to DOM0 */
uint64_t mfn;
uint64_t status;
};
struct cpu_offline_action {
/* Params for passing the identity of the offlined CPU to DOM0 */
uint32_t mc_socketid;
uint16_t mc_coreid;
uint16_t mc_core_threadid;
};
#define MAX_UNION_SIZE 16
struct mcinfo_recovery {
struct mcinfo_common common;
uint16_t mc_bank; /* bank nr */
uint8_t action_flags;
uint8_t action_types;
union {
struct page_offline_action page_retire;
struct cpu_offline_action cpu_offline;
uint8_t pad[MAX_UNION_SIZE];
} action_info;
};
#define MCINFO_MAXSIZE 768
struct mc_info {
/* Number of mcinfo_* entries in mi_data */
uint32_t mi_nentries;
uint32_t flags;
uint64_t mi_data[(MCINFO_MAXSIZE - 1) / 8];
};
DEFINE_GUEST_HANDLE_STRUCT(mc_info);
#define __MC_MSR_ARRAYSIZE 8
#define __MC_MSR_MCGCAP 0
#define __MC_NMSRS 1
#define MC_NCAPS 7
struct mcinfo_logical_cpu {
uint32_t mc_cpunr;
uint32_t mc_chipid;
uint16_t mc_coreid;
uint16_t mc_threadid;
uint32_t mc_apicid;
uint32_t mc_clusterid;
uint32_t mc_ncores;
uint32_t mc_ncores_active;
uint32_t mc_nthreads;
uint32_t mc_cpuid_level;
uint32_t mc_family;
uint32_t mc_vendor;
uint32_t mc_model;
uint32_t mc_step;
char mc_vendorid[16];
char mc_brandid[64];
uint32_t mc_cpu_caps[MC_NCAPS];
uint32_t mc_cache_size;
uint32_t mc_cache_alignment;
uint32_t mc_nmsrvals;
struct mcinfo_msr mc_msrvalues[__MC_MSR_ARRAYSIZE];
};
DEFINE_GUEST_HANDLE_STRUCT(mcinfo_logical_cpu);
/*
* Prototype:
* uint32_t x86_mcinfo_nentries(struct mc_info *mi);
*/
#define x86_mcinfo_nentries(_mi) \
((_mi)->mi_nentries)
/*
* Prototype:
* struct mcinfo_common *x86_mcinfo_first(struct mc_info *mi);
*/
#define x86_mcinfo_first(_mi) \
((struct mcinfo_common *)(_mi)->mi_data)
/*
* Prototype:
* struct mcinfo_common *x86_mcinfo_next(struct mcinfo_common *mic);
*/
#define x86_mcinfo_next(_mic) \
((struct mcinfo_common *)((uint8_t *)(_mic) + (_mic)->size))
/*
* Prototype:
* void x86_mcinfo_lookup(void *ret, struct mc_info *mi, uint16_t type);
*/
static inline void x86_mcinfo_lookup(struct mcinfo_common **ret,
struct mc_info *mi, uint16_t type)
{
uint32_t i;
struct mcinfo_common *mic;
bool found = 0;
if (!ret || !mi)
return;
mic = x86_mcinfo_first(mi);
for (i = 0; i < x86_mcinfo_nentries(mi); i++) {
if (mic->type == type) {
found = 1;
break;
}
mic = x86_mcinfo_next(mic);
}
*ret = found ? mic : NULL;
}
/*
* Fetch machine check data from hypervisor.
*/
#define XEN_MC_fetch 1
struct xen_mc_fetch {
/*
* IN: XEN_MC_NONURGENT, XEN_MC_URGENT,
* XEN_MC_ACK if ack'king an earlier fetch
* OUT: XEN_MC_OK, XEN_MC_FETCHAILED, XEN_MC_NODATA
*/
uint32_t flags;
uint32_t _pad0;
/* OUT: id for ack, IN: id we are ack'ing */
uint64_t fetch_id;
/* OUT variables. */
GUEST_HANDLE(mc_info) data;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_mc_fetch);
/*
* This tells the hypervisor to notify a DomU about the machine check error
*/
#define XEN_MC_notifydomain 2
struct xen_mc_notifydomain {
/* IN variables */
uint16_t mc_domid; /* The unprivileged domain to notify */
uint16_t mc_vcpuid; /* The vcpu in mc_domid to notify */
/* IN/OUT variables */
uint32_t flags;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_mc_notifydomain);
#define XEN_MC_physcpuinfo 3
struct xen_mc_physcpuinfo {
/* IN/OUT */
uint32_t ncpus;
uint32_t _pad0;
/* OUT */
GUEST_HANDLE(mcinfo_logical_cpu) info;
};
#define XEN_MC_msrinject 4
#define MC_MSRINJ_MAXMSRS 8
struct xen_mc_msrinject {
/* IN */
uint32_t mcinj_cpunr; /* target processor id */
uint32_t mcinj_flags; /* see MC_MSRINJ_F_* below */
uint32_t mcinj_count; /* 0 .. count-1 in array are valid */
uint32_t _pad0;
struct mcinfo_msr mcinj_msr[MC_MSRINJ_MAXMSRS];
};
/* Flags for mcinj_flags above; bits 16-31 are reserved */
#define MC_MSRINJ_F_INTERPOSE 0x1
#define XEN_MC_mceinject 5
struct xen_mc_mceinject {
unsigned int mceinj_cpunr; /* target processor id */
};
struct xen_mc {
uint32_t cmd;
uint32_t interface_version; /* XEN_MCA_INTERFACE_VERSION */
union {
struct xen_mc_fetch mc_fetch;
struct xen_mc_notifydomain mc_notifydomain;
struct xen_mc_physcpuinfo mc_physcpuinfo;
struct xen_mc_msrinject mc_msrinject;
struct xen_mc_mceinject mc_mceinject;
} u;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_mc);
/* Fields are zero when not available */
struct xen_mce {
__u64 status;
__u64 misc;
__u64 addr;
__u64 mcgstatus;
__u64 ip;
__u64 tsc; /* cpu time stamp counter */
__u64 time; /* wall time_t when error was detected */
__u8 cpuvendor; /* cpu vendor as encoded in system.h */
__u8 inject_flags; /* software inject flags */
__u16 pad;
__u32 cpuid; /* CPUID 1 EAX */
__u8 cs; /* code segment */
__u8 bank; /* machine check bank */
__u8 cpu; /* cpu number; obsolete; use extcpu now */
__u8 finished; /* entry is valid */
__u32 extcpu; /* linux cpu number that detected the error */
__u32 socketid; /* CPU socket ID */
__u32 apicid; /* CPU initial apic ID */
__u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */
};
/*
* This structure contains all data related to the MCE log. Also
* carries a signature to make it easier to find from external
* debugging tools. Each entry is only valid when its finished flag
* is set.
*/
#define XEN_MCE_LOG_LEN 32
struct xen_mce_log {
char signature[12]; /* "MACHINECHECK" */
unsigned len; /* = XEN_MCE_LOG_LEN */
unsigned next;
unsigned flags;
unsigned recordlen; /* length of struct xen_mce */
struct xen_mce entry[XEN_MCE_LOG_LEN];
};
#define XEN_MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
#define XEN_MCE_LOG_SIGNATURE "MACHINECHECK"
#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
#define MCE_GET_LOG_LEN _IOR('M', 2, int)
#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int)
#endif /* __ASSEMBLY__ */
#endif /* __XEN_PUBLIC_ARCH_X86_MCA_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment