Commit fec98069 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cpu updates from Ingo Molnar:
 "The main changes in this cycle were:

   - Add support for the "Dhyana" x86 CPUs by Hygon: these are licensed
     based on the AMD Zen architecture, and are built and sold in China,
     for domestic datacenter use. The code is pretty close to AMD
     support, mostly with a few quirks and enumeration differences. (Pu
     Wen)

   - Enable CPUID support on Cyrix 6x86/6x86L processors"

* 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  tools/cpupower: Add Hygon Dhyana support
  cpufreq: Add Hygon Dhyana support
  ACPI: Add Hygon Dhyana support
  x86/xen: Add Hygon Dhyana support to Xen
  x86/kvm: Add Hygon Dhyana support to KVM
  x86/mce: Add Hygon Dhyana support to the MCA infrastructure
  x86/bugs: Add Hygon Dhyana to the respective mitigation machinery
  x86/apic: Add Hygon Dhyana support
  x86/pci, x86/amd_nb: Add Hygon Dhyana support to PCI and northbridge
  x86/amd_nb: Check vendor in AMD-only functions
  x86/alternative: Init ideal_nops for Hygon Dhyana
  x86/events: Add Hygon Dhyana support to PMU infrastructure
  x86/smpboot: Do not use BSP INIT delay and MWAIT to idle on Dhyana
  x86/cpu/mtrr: Support TOP_MEM2 and get MTRR number
  x86/cpu: Get cache info and setup cache cpumap for Hygon Dhyana
  x86/cpu: Create Hygon Dhyana architecture support file
  x86/CPU: Change query logic so CPUID is enabled before testing
  x86/CPU: Use correct macros for Cyrix calls
parents 04ce7fae 995d5f64
...@@ -6787,6 +6787,12 @@ S: Maintained ...@@ -6787,6 +6787,12 @@ S: Maintained
F: mm/memory-failure.c F: mm/memory-failure.c
F: mm/hwpoison-inject.c F: mm/hwpoison-inject.c
HYGON PROCESSOR SUPPORT
M: Pu Wen <puwen@hygon.cn>
L: linux-kernel@vger.kernel.org
S: Maintained
F: arch/x86/kernel/cpu/hygon.c
Hyper-V CORE AND DRIVERS Hyper-V CORE AND DRIVERS
M: "K. Y. Srinivasan" <kys@microsoft.com> M: "K. Y. Srinivasan" <kys@microsoft.com>
M: Haiyang Zhang <haiyangz@microsoft.com> M: Haiyang Zhang <haiyangz@microsoft.com>
......
...@@ -426,6 +426,20 @@ config CPU_SUP_AMD ...@@ -426,6 +426,20 @@ config CPU_SUP_AMD
If unsure, say N. If unsure, say N.
config CPU_SUP_HYGON
default y
bool "Support Hygon processors" if PROCESSOR_SELECT
select CPU_SUP_AMD
help
This enables detection, tunings and quirks for Hygon processors
You need this enabled if you want your kernel to run on an
Hygon CPU. Disabling this option on other types of CPUs
makes the kernel a tiny bit smaller. Disabling it on an Hygon
CPU might render the kernel unbootable.
If unsure, say N.
config CPU_SUP_CENTAUR config CPU_SUP_CENTAUR
default y default y
bool "Support Centaur processors" if PROCESSOR_SELECT bool "Support Centaur processors" if PROCESSOR_SELECT
......
...@@ -669,6 +669,10 @@ static int __init amd_core_pmu_init(void) ...@@ -669,6 +669,10 @@ static int __init amd_core_pmu_init(void)
* We fallback to using default amd_get_event_constraints. * We fallback to using default amd_get_event_constraints.
*/ */
break; break;
case 0x18:
pr_cont("Fam18h ");
/* Using default amd_get_event_constraints. */
break;
default: default:
pr_err("core perfctr but no constraints; unknown hardware!\n"); pr_err("core perfctr but no constraints; unknown hardware!\n");
return -ENODEV; return -ENODEV;
......
...@@ -515,17 +515,19 @@ static int __init amd_uncore_init(void) ...@@ -515,17 +515,19 @@ static int __init amd_uncore_init(void)
{ {
int ret = -ENODEV; int ret = -ENODEV;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
return -ENODEV; return -ENODEV;
if (!boot_cpu_has(X86_FEATURE_TOPOEXT)) if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
return -ENODEV; return -ENODEV;
if (boot_cpu_data.x86 == 0x17) { if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
/* /*
* For F17h, the Northbridge counters are repurposed as Data * For F17h or F18h, the Northbridge counters are
* Fabric counters. Also, L3 counters are supported too. The PMUs * repurposed as Data Fabric counters. Also, L3
* are exported based on family as either L2 or L3 and NB or DF. * counters are supported too. The PMUs are exported
* based on family as either L2 or L3 and NB or DF.
*/ */
num_counters_nb = NUM_COUNTERS_NB; num_counters_nb = NUM_COUNTERS_NB;
num_counters_llc = NUM_COUNTERS_L3; num_counters_llc = NUM_COUNTERS_L3;
...@@ -557,7 +559,9 @@ static int __init amd_uncore_init(void) ...@@ -557,7 +559,9 @@ static int __init amd_uncore_init(void)
if (ret) if (ret)
goto fail_nb; goto fail_nb;
pr_info("AMD NB counters detected\n"); pr_info("%s NB counters detected\n",
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
"HYGON" : "AMD");
ret = 0; ret = 0;
} }
...@@ -571,7 +575,9 @@ static int __init amd_uncore_init(void) ...@@ -571,7 +575,9 @@ static int __init amd_uncore_init(void)
if (ret) if (ret)
goto fail_llc; goto fail_llc;
pr_info("AMD LLC counters detected\n"); pr_info("%s LLC counters detected\n",
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
"HYGON" : "AMD");
ret = 0; ret = 0;
} }
......
...@@ -1797,6 +1797,10 @@ static int __init init_hw_perf_events(void) ...@@ -1797,6 +1797,10 @@ static int __init init_hw_perf_events(void)
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
err = amd_pmu_init(); err = amd_pmu_init();
break; break;
case X86_VENDOR_HYGON:
err = amd_pmu_init();
x86_pmu.name = "HYGON";
break;
default: default:
err = -ENOTSUPP; err = -ENOTSUPP;
} }
......
...@@ -103,6 +103,9 @@ static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev) ...@@ -103,6 +103,9 @@ static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev)
static inline bool amd_gart_present(void) static inline bool amd_gart_present(void)
{ {
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return false;
/* GART present only on Fam15h, upto model 0fh */ /* GART present only on Fam15h, upto model 0fh */
if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
(boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10)) (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
......
...@@ -3,5 +3,6 @@ ...@@ -3,5 +3,6 @@
#define _ASM_X86_CACHEINFO_H #define _ASM_X86_CACHEINFO_H
void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id); void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id);
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id);
#endif /* _ASM_X86_CACHEINFO_H */ #endif /* _ASM_X86_CACHEINFO_H */
...@@ -364,6 +364,10 @@ struct x86_emulate_ctxt { ...@@ -364,6 +364,10 @@ struct x86_emulate_ctxt {
#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574 #define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574
#define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273 #define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273
#define X86EMUL_CPUID_VENDOR_HygonGenuine_ebx 0x6f677948
#define X86EMUL_CPUID_VENDOR_HygonGenuine_ecx 0x656e6975
#define X86EMUL_CPUID_VENDOR_HygonGenuine_edx 0x6e65476e
#define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547 #define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547
#define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e #define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e
#define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69 #define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69
......
...@@ -217,6 +217,8 @@ static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { } ...@@ -217,6 +217,8 @@ static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) { return -EINVAL; }; static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) { return -EINVAL; };
#endif #endif
static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_amd_feature_init(c); }
int mce_available(struct cpuinfo_x86 *c); int mce_available(struct cpuinfo_x86 *c);
bool mce_is_memory_error(struct mce *m); bool mce_is_memory_error(struct mce *m);
......
...@@ -155,7 +155,8 @@ enum cpuid_regs_idx { ...@@ -155,7 +155,8 @@ enum cpuid_regs_idx {
#define X86_VENDOR_CENTAUR 5 #define X86_VENDOR_CENTAUR 5
#define X86_VENDOR_TRANSMETA 7 #define X86_VENDOR_TRANSMETA 7
#define X86_VENDOR_NSC 8 #define X86_VENDOR_NSC 8
#define X86_VENDOR_NUM 9 #define X86_VENDOR_HYGON 9
#define X86_VENDOR_NUM 10
#define X86_VENDOR_UNKNOWN 0xff #define X86_VENDOR_UNKNOWN 0xff
......
...@@ -83,9 +83,10 @@ static inline void cpu_emergency_vmxoff(void) ...@@ -83,9 +83,10 @@ static inline void cpu_emergency_vmxoff(void)
*/ */
static inline int cpu_has_svm(const char **msg) static inline int cpu_has_svm(const char **msg)
{ {
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) {
if (msg) if (msg)
*msg = "not amd"; *msg = "not amd or hygon";
return 0; return 0;
} }
......
...@@ -222,6 +222,10 @@ void __init arch_init_ideal_nops(void) ...@@ -222,6 +222,10 @@ void __init arch_init_ideal_nops(void)
} }
break; break;
case X86_VENDOR_HYGON:
ideal_nops = p6_nops;
return;
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
if (boot_cpu_data.x86 > 0xf) { if (boot_cpu_data.x86 > 0xf) {
ideal_nops = p6_nops; ideal_nops = p6_nops;
......
...@@ -61,6 +61,21 @@ static const struct pci_device_id amd_nb_link_ids[] = { ...@@ -61,6 +61,21 @@ static const struct pci_device_id amd_nb_link_ids[] = {
{} {}
}; };
static const struct pci_device_id hygon_root_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
{}
};
const struct pci_device_id hygon_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{}
};
static const struct pci_device_id hygon_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
{}
};
const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
{ 0x00, 0x18, 0x20 }, { 0x00, 0x18, 0x20 },
{ 0xff, 0x00, 0x20 }, { 0xff, 0x00, 0x20 },
...@@ -194,15 +209,24 @@ EXPORT_SYMBOL_GPL(amd_df_indirect_read); ...@@ -194,15 +209,24 @@ EXPORT_SYMBOL_GPL(amd_df_indirect_read);
int amd_cache_northbridges(void) int amd_cache_northbridges(void)
{ {
u16 i = 0; const struct pci_device_id *misc_ids = amd_nb_misc_ids;
struct amd_northbridge *nb; const struct pci_device_id *link_ids = amd_nb_link_ids;
const struct pci_device_id *root_ids = amd_root_ids;
struct pci_dev *root, *misc, *link; struct pci_dev *root, *misc, *link;
struct amd_northbridge *nb;
u16 i = 0;
if (amd_northbridges.num) if (amd_northbridges.num)
return 0; return 0;
if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
root_ids = hygon_root_ids;
misc_ids = hygon_nb_misc_ids;
link_ids = hygon_nb_link_ids;
}
misc = NULL; misc = NULL;
while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL) while ((misc = next_northbridge(misc, misc_ids)) != NULL)
i++; i++;
if (!i) if (!i)
...@@ -218,11 +242,11 @@ int amd_cache_northbridges(void) ...@@ -218,11 +242,11 @@ int amd_cache_northbridges(void)
link = misc = root = NULL; link = misc = root = NULL;
for (i = 0; i != amd_northbridges.num; i++) { for (i = 0; i != amd_northbridges.num; i++) {
node_to_amd_nb(i)->root = root = node_to_amd_nb(i)->root = root =
next_northbridge(root, amd_root_ids); next_northbridge(root, root_ids);
node_to_amd_nb(i)->misc = misc = node_to_amd_nb(i)->misc = misc =
next_northbridge(misc, amd_nb_misc_ids); next_northbridge(misc, misc_ids);
node_to_amd_nb(i)->link = link = node_to_amd_nb(i)->link = link =
next_northbridge(link, amd_nb_link_ids); next_northbridge(link, link_ids);
} }
if (amd_gart_present()) if (amd_gart_present())
...@@ -261,11 +285,19 @@ EXPORT_SYMBOL_GPL(amd_cache_northbridges); ...@@ -261,11 +285,19 @@ EXPORT_SYMBOL_GPL(amd_cache_northbridges);
*/ */
bool __init early_is_amd_nb(u32 device) bool __init early_is_amd_nb(u32 device)
{ {
const struct pci_device_id *misc_ids = amd_nb_misc_ids;
const struct pci_device_id *id; const struct pci_device_id *id;
u32 vendor = device & 0xffff; u32 vendor = device & 0xffff;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
return false;
if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
misc_ids = hygon_nb_misc_ids;
device >>= 16; device >>= 16;
for (id = amd_nb_misc_ids; id->vendor; id++) for (id = misc_ids; id->vendor; id++)
if (vendor == id->vendor && device == id->device) if (vendor == id->vendor && device == id->device)
return true; return true;
return false; return false;
...@@ -277,7 +309,8 @@ struct resource *amd_get_mmconfig_range(struct resource *res) ...@@ -277,7 +309,8 @@ struct resource *amd_get_mmconfig_range(struct resource *res)
u64 base, msr; u64 base, msr;
unsigned int segn_busn_bits; unsigned int segn_busn_bits;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
return NULL; return NULL;
/* assume all cpus from fam10h have mmconfig */ /* assume all cpus from fam10h have mmconfig */
......
...@@ -224,6 +224,11 @@ static int modern_apic(void) ...@@ -224,6 +224,11 @@ static int modern_apic(void)
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
boot_cpu_data.x86 >= 0xf) boot_cpu_data.x86 >= 0xf)
return 1; return 1;
/* Hygon systems use modern APIC */
if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
return 1;
return lapic_get_version() >= 0x14; return lapic_get_version() >= 0x14;
} }
...@@ -1912,6 +1917,8 @@ static int __init detect_init_APIC(void) ...@@ -1912,6 +1917,8 @@ static int __init detect_init_APIC(void)
(boot_cpu_data.x86 >= 15)) (boot_cpu_data.x86 >= 15))
break; break;
goto no_apic; goto no_apic;
case X86_VENDOR_HYGON:
break;
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 || if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
(boot_cpu_data.x86 == 5 && boot_cpu_has(X86_FEATURE_APIC))) (boot_cpu_data.x86 == 5 && boot_cpu_has(X86_FEATURE_APIC)))
......
...@@ -185,6 +185,7 @@ void __init default_setup_apic_routing(void) ...@@ -185,6 +185,7 @@ void __init default_setup_apic_routing(void)
break; break;
} }
/* If P4 and above fall through */ /* If P4 and above fall through */
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
def_to_bigsmp = 1; def_to_bigsmp = 1;
} }
......
...@@ -30,6 +30,7 @@ obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o ...@@ -30,6 +30,7 @@ obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
obj-$(CONFIG_CPU_SUP_INTEL) += intel.o intel_pconfig.o obj-$(CONFIG_CPU_SUP_INTEL) += intel.o intel_pconfig.o
obj-$(CONFIG_CPU_SUP_AMD) += amd.o obj-$(CONFIG_CPU_SUP_AMD) += amd.o
obj-$(CONFIG_CPU_SUP_HYGON) += hygon.o
obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
......
...@@ -312,6 +312,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) ...@@ -312,6 +312,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
} }
if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD && if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON &&
boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n"); pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
return SPECTRE_V2_CMD_AUTO; return SPECTRE_V2_CMD_AUTO;
...@@ -371,7 +372,8 @@ static void __init spectre_v2_select_mitigation(void) ...@@ -371,7 +372,8 @@ static void __init spectre_v2_select_mitigation(void)
return; return;
retpoline_auto: retpoline_auto:
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
retpoline_amd: retpoline_amd:
if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n"); pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
......
...@@ -602,6 +602,10 @@ cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf) ...@@ -602,6 +602,10 @@ cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
else else
amd_cpuid4(index, &eax, &ebx, &ecx); amd_cpuid4(index, &eax, &ebx, &ecx);
amd_init_l3_cache(this_leaf, index); amd_init_l3_cache(this_leaf, index);
} else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
cpuid_count(0x8000001d, index, &eax.full,
&ebx.full, &ecx.full, &edx);
amd_init_l3_cache(this_leaf, index);
} else { } else {
cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
} }
...@@ -625,7 +629,8 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c) ...@@ -625,7 +629,8 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c)
union _cpuid4_leaf_eax cache_eax; union _cpuid4_leaf_eax cache_eax;
int i = -1; int i = -1;
if (c->x86_vendor == X86_VENDOR_AMD) if (c->x86_vendor == X86_VENDOR_AMD ||
c->x86_vendor == X86_VENDOR_HYGON)
op = 0x8000001d; op = 0x8000001d;
else else
op = 4; op = 4;
...@@ -678,6 +683,22 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id) ...@@ -678,6 +683,22 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
} }
} }
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
{
/*
* We may have multiple LLCs if L3 caches exist, so check if we
* have an L3 cache by looking at the L3 cache CPUID leaf.
*/
if (!cpuid_edx(0x80000006))
return;
/*
* LLC is at the core complex level.
* Core complex ID is ApicId[3] for these processors.
*/
per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
}
void init_amd_cacheinfo(struct cpuinfo_x86 *c) void init_amd_cacheinfo(struct cpuinfo_x86 *c)
{ {
...@@ -691,6 +712,11 @@ void init_amd_cacheinfo(struct cpuinfo_x86 *c) ...@@ -691,6 +712,11 @@ void init_amd_cacheinfo(struct cpuinfo_x86 *c)
} }
} }
void init_hygon_cacheinfo(struct cpuinfo_x86 *c)
{
num_cache_leaves = find_num_cache_leaves(c);
}
void init_intel_cacheinfo(struct cpuinfo_x86 *c) void init_intel_cacheinfo(struct cpuinfo_x86 *c)
{ {
/* Cache sizes */ /* Cache sizes */
...@@ -913,7 +939,8 @@ static void __cache_cpumap_setup(unsigned int cpu, int index, ...@@ -913,7 +939,8 @@ static void __cache_cpumap_setup(unsigned int cpu, int index,
int index_msb, i; int index_msb, i;
struct cpuinfo_x86 *c = &cpu_data(cpu); struct cpuinfo_x86 *c = &cpu_data(cpu);
if (c->x86_vendor == X86_VENDOR_AMD) { if (c->x86_vendor == X86_VENDOR_AMD ||
c->x86_vendor == X86_VENDOR_HYGON) {
if (__cache_amd_cpumap_setup(cpu, index, base)) if (__cache_amd_cpumap_setup(cpu, index, base))
return; return;
} }
......
...@@ -963,6 +963,7 @@ static const __initconst struct x86_cpu_id cpu_no_speculation[] = { ...@@ -963,6 +963,7 @@ static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
static const __initconst struct x86_cpu_id cpu_no_meltdown[] = { static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
{ X86_VENDOR_AMD }, { X86_VENDOR_AMD },
{ X86_VENDOR_HYGON },
{} {}
}; };
...@@ -1076,6 +1077,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) ...@@ -1076,6 +1077,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
memset(&c->x86_capability, 0, sizeof c->x86_capability); memset(&c->x86_capability, 0, sizeof c->x86_capability);
c->extended_cpuid_level = 0; c->extended_cpuid_level = 0;
if (!have_cpuid_p())
identify_cpu_without_cpuid(c);
/* cyrix could have cpuid enabled via c_identify()*/ /* cyrix could have cpuid enabled via c_identify()*/
if (have_cpuid_p()) { if (have_cpuid_p()) {
cpu_detect(c); cpu_detect(c);
...@@ -1093,7 +1097,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) ...@@ -1093,7 +1097,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
if (this_cpu->c_bsp_init) if (this_cpu->c_bsp_init)
this_cpu->c_bsp_init(c); this_cpu->c_bsp_init(c);
} else { } else {
identify_cpu_without_cpuid(c);
setup_clear_cpu_cap(X86_FEATURE_CPUID); setup_clear_cpu_cap(X86_FEATURE_CPUID);
} }
......
...@@ -54,6 +54,7 @@ extern u32 get_scattered_cpuid_leaf(unsigned int level, ...@@ -54,6 +54,7 @@ extern u32 get_scattered_cpuid_leaf(unsigned int level,
enum cpuid_regs_idx reg); enum cpuid_regs_idx reg);
extern void init_intel_cacheinfo(struct cpuinfo_x86 *c); extern void init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern void init_amd_cacheinfo(struct cpuinfo_x86 *c); extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
extern void init_hygon_cacheinfo(struct cpuinfo_x86 *c);
extern void detect_num_cpu_cores(struct cpuinfo_x86 *c); extern void detect_num_cpu_cores(struct cpuinfo_x86 *c);
extern int detect_extended_topology_early(struct cpuinfo_x86 *c); extern int detect_extended_topology_early(struct cpuinfo_x86 *c);
......
...@@ -437,7 +437,7 @@ static void cyrix_identify(struct cpuinfo_x86 *c) ...@@ -437,7 +437,7 @@ static void cyrix_identify(struct cpuinfo_x86 *c)
/* enable MAPEN */ /* enable MAPEN */
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
/* enable cpuid */ /* enable cpuid */
setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80);
/* disable MAPEN */ /* disable MAPEN */
setCx86(CX86_CCR3, ccr3); setCx86(CX86_CCR3, ccr3);
local_irq_restore(flags); local_irq_restore(flags);
......
This diff is collapsed.
...@@ -336,7 +336,8 @@ int (*mce_severity)(struct mce *m, int tolerant, char **msg, bool is_excp) = ...@@ -336,7 +336,8 @@ int (*mce_severity)(struct mce *m, int tolerant, char **msg, bool is_excp) =
void __init mcheck_vendor_init_severity(void) void __init mcheck_vendor_init_severity(void)
{ {
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
mce_severity = mce_severity_amd; mce_severity = mce_severity_amd;
} }
......
...@@ -270,7 +270,7 @@ static void print_mce(struct mce *m) ...@@ -270,7 +270,7 @@ static void print_mce(struct mce *m)
{ {
__print_mce(m); __print_mce(m);
if (m->cpuvendor != X86_VENDOR_AMD) if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON)
pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n"); pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
} }
...@@ -508,9 +508,9 @@ static int mce_usable_address(struct mce *m) ...@@ -508,9 +508,9 @@ static int mce_usable_address(struct mce *m)
bool mce_is_memory_error(struct mce *m) bool mce_is_memory_error(struct mce *m)
{ {
if (m->cpuvendor == X86_VENDOR_AMD) { if (m->cpuvendor == X86_VENDOR_AMD ||
m->cpuvendor == X86_VENDOR_HYGON) {
return amd_mce_is_memory_error(m); return amd_mce_is_memory_error(m);
} else if (m->cpuvendor == X86_VENDOR_INTEL) { } else if (m->cpuvendor == X86_VENDOR_INTEL) {
/* /*
* Intel SDM Volume 3B - 15.9.2 Compound Error Codes * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
...@@ -539,6 +539,9 @@ static bool mce_is_correctable(struct mce *m) ...@@ -539,6 +539,9 @@ static bool mce_is_correctable(struct mce *m)
if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED) if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
return false; return false;
if (m->cpuvendor == X86_VENDOR_HYGON && m->status & MCI_STATUS_DEFERRED)
return false;
if (m->status & MCI_STATUS_UC) if (m->status & MCI_STATUS_UC)
return false; return false;
...@@ -1705,7 +1708,7 @@ static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) ...@@ -1705,7 +1708,7 @@ static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
*/ */
static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
{ {
if (c->x86_vendor == X86_VENDOR_AMD) { if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) {
mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV); mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR); mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR);
mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA); mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA);
...@@ -1746,6 +1749,11 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) ...@@ -1746,6 +1749,11 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
mce_amd_feature_init(c); mce_amd_feature_init(c);
break; break;
} }
case X86_VENDOR_HYGON:
mce_hygon_feature_init(c);
break;
case X86_VENDOR_CENTAUR: case X86_VENDOR_CENTAUR:
mce_centaur_feature_init(c); mce_centaur_feature_init(c);
break; break;
...@@ -1971,12 +1979,14 @@ static void mce_disable_error_reporting(void) ...@@ -1971,12 +1979,14 @@ static void mce_disable_error_reporting(void)
static void vendor_disable_error_reporting(void) static void vendor_disable_error_reporting(void)
{ {
/* /*
* Don't clear on Intel or AMD CPUs. Some of these MSRs are socket-wide. * Don't clear on Intel or AMD or Hygon CPUs. Some of these MSRs
* are socket-wide.
* Disabling them for just a single offlined CPU is bad, since it will * Disabling them for just a single offlined CPU is bad, since it will
* inhibit reporting for all shared resources on the socket like the * inhibit reporting for all shared resources on the socket like the
* last level cache (LLC), the integrated memory controller (iMC), etc. * last level cache (LLC), the integrated memory controller (iMC), etc.
*/ */
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ||
boot_cpu_data.x86_vendor == X86_VENDOR_AMD) boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
return; return;
......
...@@ -831,7 +831,8 @@ int __init amd_special_default_mtrr(void) ...@@ -831,7 +831,8 @@ int __init amd_special_default_mtrr(void)
{ {
u32 l, h; u32 l, h;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
return 0; return 0;
if (boot_cpu_data.x86 < 0xf) if (boot_cpu_data.x86 < 0xf)
return 0; return 0;
......
...@@ -127,7 +127,7 @@ static void __init set_num_var_ranges(void) ...@@ -127,7 +127,7 @@ static void __init set_num_var_ranges(void)
if (use_intel()) if (use_intel())
rdmsr(MSR_MTRRcap, config, dummy); rdmsr(MSR_MTRRcap, config, dummy);
else if (is_cpu(AMD)) else if (is_cpu(AMD) || is_cpu(HYGON))
config = 2; config = 2;
else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
config = 8; config = 8;
......
...@@ -46,6 +46,7 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) ...@@ -46,6 +46,7 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
{ {
/* returns the bit offset of the performance counter register */ /* returns the bit offset of the performance counter register */
switch (boot_cpu_data.x86_vendor) { switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
if (msr >= MSR_F15H_PERF_CTR) if (msr >= MSR_F15H_PERF_CTR)
return (msr - MSR_F15H_PERF_CTR) >> 1; return (msr - MSR_F15H_PERF_CTR) >> 1;
...@@ -74,6 +75,7 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) ...@@ -74,6 +75,7 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
{ {
/* returns the bit offset of the event selection register */ /* returns the bit offset of the event selection register */
switch (boot_cpu_data.x86_vendor) { switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
if (msr >= MSR_F15H_PERF_CTL) if (msr >= MSR_F15H_PERF_CTL)
return (msr - MSR_F15H_PERF_CTL) >> 1; return (msr - MSR_F15H_PERF_CTL) >> 1;
......
...@@ -676,6 +676,7 @@ static void __init smp_quirk_init_udelay(void) ...@@ -676,6 +676,7 @@ static void __init smp_quirk_init_udelay(void)
/* if modern processor, use no delay */ /* if modern processor, use no delay */
if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) || if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && (boot_cpu_data.x86 >= 0x18)) ||
((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) { ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
init_udelay = 0; init_udelay = 0;
return; return;
...@@ -1592,7 +1593,8 @@ static inline void mwait_play_dead(void) ...@@ -1592,7 +1593,8 @@ static inline void mwait_play_dead(void)
void *mwait_ptr; void *mwait_ptr;
int i; int i;
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
return; return;
if (!this_cpu_has(X86_FEATURE_MWAIT)) if (!this_cpu_has(X86_FEATURE_MWAIT))
return; return;
......
...@@ -2711,7 +2711,16 @@ static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) ...@@ -2711,7 +2711,16 @@ static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
return true; return true;
/* default: (not Intel, not AMD), apply Intel's stricter rules... */ /* Hygon ("HygonGenuine") */
if (ebx == X86EMUL_CPUID_VENDOR_HygonGenuine_ebx &&
ecx == X86EMUL_CPUID_VENDOR_HygonGenuine_ecx &&
edx == X86EMUL_CPUID_VENDOR_HygonGenuine_edx)
return true;
/*
* default: (not Intel, not AMD, not Hygon), apply Intel's
* stricter rules...
*/
return false; return false;
} }
......
...@@ -93,7 +93,8 @@ static int __init early_root_info_init(void) ...@@ -93,7 +93,8 @@ static int __init early_root_info_init(void)
vendor = id & 0xffff; vendor = id & 0xffff;
device = (id>>16) & 0xffff; device = (id>>16) & 0xffff;
if (vendor != PCI_VENDOR_ID_AMD) if (vendor != PCI_VENDOR_ID_AMD &&
vendor != PCI_VENDOR_ID_HYGON)
continue; continue;
if (hb_probes[i].device == device) { if (hb_probes[i].device == device) {
...@@ -390,7 +391,8 @@ static int __init pci_io_ecs_init(void) ...@@ -390,7 +391,8 @@ static int __init pci_io_ecs_init(void)
static int __init amd_postcore_init(void) static int __init amd_postcore_init(void)
{ {
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
return 0; return 0;
early_root_info_init(); early_root_info_init();
......
...@@ -91,6 +91,12 @@ static void xen_pmu_arch_init(void) ...@@ -91,6 +91,12 @@ static void xen_pmu_arch_init(void)
k7_counters_mirrored = 0; k7_counters_mirrored = 0;
break; break;
} }
} else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
amd_num_counters = F10H_NUM_COUNTERS;
amd_counters_base = MSR_K7_PERFCTR0;
amd_ctrls_base = MSR_K7_EVNTSEL0;
amd_msr_step = 1;
k7_counters_mirrored = 0;
} else { } else {
uint32_t eax, ebx, ecx, edx; uint32_t eax, ebx, ecx, edx;
...@@ -286,7 +292,7 @@ static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read) ...@@ -286,7 +292,7 @@ static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read)
bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err) bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err)
{ {
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
if (is_amd_pmu_msr(msr)) { if (is_amd_pmu_msr(msr)) {
if (!xen_amd_pmu_emulate(msr, val, 1)) if (!xen_amd_pmu_emulate(msr, val, 1))
*val = native_read_msr_safe(msr, err); *val = native_read_msr_safe(msr, err);
...@@ -309,7 +315,7 @@ bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err) ...@@ -309,7 +315,7 @@ bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err)
{ {
uint64_t val = ((uint64_t)high << 32) | low; uint64_t val = ((uint64_t)high << 32) | low;
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
if (is_amd_pmu_msr(msr)) { if (is_amd_pmu_msr(msr)) {
if (!xen_amd_pmu_emulate(msr, &val, 0)) if (!xen_amd_pmu_emulate(msr, &val, 0))
*err = native_write_msr_safe(msr, low, high); *err = native_write_msr_safe(msr, low, high);
...@@ -380,7 +386,7 @@ static unsigned long long xen_intel_read_pmc(int counter) ...@@ -380,7 +386,7 @@ static unsigned long long xen_intel_read_pmc(int counter)
unsigned long long xen_read_pmc(int counter) unsigned long long xen_read_pmc(int counter)
{ {
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return xen_amd_read_pmc(counter); return xen_amd_read_pmc(counter);
else else
return xen_intel_read_pmc(counter); return xen_intel_read_pmc(counter);
......
...@@ -70,6 +70,7 @@ static void power_saving_mwait_init(void) ...@@ -70,6 +70,7 @@ static void power_saving_mwait_init(void)
#if defined(CONFIG_X86) #if defined(CONFIG_X86)
switch (boot_cpu_data.x86_vendor) { switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
/* /*
......
...@@ -205,6 +205,7 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr, ...@@ -205,6 +205,7 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
static void tsc_check_state(int state) static void tsc_check_state(int state)
{ {
switch (boot_cpu_data.x86_vendor) { switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
case X86_VENDOR_CENTAUR: case X86_VENDOR_CENTAUR:
......
...@@ -61,6 +61,7 @@ enum { ...@@ -61,6 +61,7 @@ enum {
#define INTEL_MSR_RANGE (0xffff) #define INTEL_MSR_RANGE (0xffff)
#define AMD_MSR_RANGE (0x7) #define AMD_MSR_RANGE (0x7)
#define HYGON_MSR_RANGE (0x7)
#define MSR_K7_HWCR_CPB_DIS (1ULL << 25) #define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
...@@ -95,6 +96,7 @@ static bool boost_state(unsigned int cpu) ...@@ -95,6 +96,7 @@ static bool boost_state(unsigned int cpu)
rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi); rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
msr = lo | ((u64)hi << 32); msr = lo | ((u64)hi << 32);
return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
msr = lo | ((u64)hi << 32); msr = lo | ((u64)hi << 32);
...@@ -113,6 +115,7 @@ static int boost_set_msr(bool enable) ...@@ -113,6 +115,7 @@ static int boost_set_msr(bool enable)
msr_addr = MSR_IA32_MISC_ENABLE; msr_addr = MSR_IA32_MISC_ENABLE;
msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE; msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
break; break;
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
msr_addr = MSR_K7_HWCR; msr_addr = MSR_K7_HWCR;
msr_mask = MSR_K7_HWCR_CPB_DIS; msr_mask = MSR_K7_HWCR_CPB_DIS;
...@@ -225,6 +228,8 @@ static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr) ...@@ -225,6 +228,8 @@ static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
msr &= AMD_MSR_RANGE; msr &= AMD_MSR_RANGE;
else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
msr &= HYGON_MSR_RANGE;
else else
msr &= INTEL_MSR_RANGE; msr &= INTEL_MSR_RANGE;
......
...@@ -111,11 +111,16 @@ static int __init amd_freq_sensitivity_init(void) ...@@ -111,11 +111,16 @@ static int __init amd_freq_sensitivity_init(void)
{ {
u64 val; u64 val;
struct pci_dev *pcidev; struct pci_dev *pcidev;
unsigned int pci_vendor;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
pci_vendor = PCI_VENDOR_ID_AMD;
else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
pci_vendor = PCI_VENDOR_ID_HYGON;
else
return -ENODEV; return -ENODEV;
pcidev = pci_get_device(PCI_VENDOR_ID_AMD, pcidev = pci_get_device(pci_vendor,
PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL); PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);
if (!pcidev) { if (!pcidev) {
......
...@@ -2565,6 +2565,8 @@ ...@@ -2565,6 +2565,8 @@
#define PCI_VENDOR_ID_AMAZON 0x1d0f #define PCI_VENDOR_ID_AMAZON 0x1d0f
#define PCI_VENDOR_ID_HYGON 0x1d94
#define PCI_VENDOR_ID_TEKRAM 0x1de1 #define PCI_VENDOR_ID_TEKRAM 0x1de1
#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 #define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
......
...@@ -170,6 +170,7 @@ static int get_boost_mode(unsigned int cpu) ...@@ -170,6 +170,7 @@ static int get_boost_mode(unsigned int cpu)
unsigned long pstates[MAX_HW_PSTATES] = {0,}; unsigned long pstates[MAX_HW_PSTATES] = {0,};
if (cpupower_cpu_info.vendor != X86_VENDOR_AMD && if (cpupower_cpu_info.vendor != X86_VENDOR_AMD &&
cpupower_cpu_info.vendor != X86_VENDOR_HYGON &&
cpupower_cpu_info.vendor != X86_VENDOR_INTEL) cpupower_cpu_info.vendor != X86_VENDOR_INTEL)
return 0; return 0;
...@@ -190,8 +191,9 @@ static int get_boost_mode(unsigned int cpu) ...@@ -190,8 +191,9 @@ static int get_boost_mode(unsigned int cpu)
printf(_(" Supported: %s\n"), support ? _("yes") : _("no")); printf(_(" Supported: %s\n"), support ? _("yes") : _("no"));
printf(_(" Active: %s\n"), active ? _("yes") : _("no")); printf(_(" Active: %s\n"), active ? _("yes") : _("no"));
if (cpupower_cpu_info.vendor == X86_VENDOR_AMD && if ((cpupower_cpu_info.vendor == X86_VENDOR_AMD &&
cpupower_cpu_info.family >= 0x10) { cpupower_cpu_info.family >= 0x10) ||
cpupower_cpu_info.vendor == X86_VENDOR_HYGON) {
ret = decode_pstates(cpu, cpupower_cpu_info.family, b_states, ret = decode_pstates(cpu, cpupower_cpu_info.family, b_states,
pstates, &pstate_no); pstates, &pstate_no);
if (ret) if (ret)
......
...@@ -45,7 +45,7 @@ static int get_did(int family, union msr_pstate pstate) ...@@ -45,7 +45,7 @@ static int get_did(int family, union msr_pstate pstate)
if (family == 0x12) if (family == 0x12)
t = pstate.val & 0xf; t = pstate.val & 0xf;
else if (family == 0x17) else if (family == 0x17 || family == 0x18)
t = pstate.fam17h_bits.did; t = pstate.fam17h_bits.did;
else else
t = pstate.bits.did; t = pstate.bits.did;
...@@ -59,7 +59,7 @@ static int get_cof(int family, union msr_pstate pstate) ...@@ -59,7 +59,7 @@ static int get_cof(int family, union msr_pstate pstate)
int fid, did, cof; int fid, did, cof;
did = get_did(family, pstate); did = get_did(family, pstate);
if (family == 0x17) { if (family == 0x17 || family == 0x18) {
fid = pstate.fam17h_bits.fid; fid = pstate.fam17h_bits.fid;
cof = 200 * fid / did; cof = 200 * fid / did;
} else { } else {
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include "helpers/helpers.h" #include "helpers/helpers.h"
static const char *cpu_vendor_table[X86_VENDOR_MAX] = { static const char *cpu_vendor_table[X86_VENDOR_MAX] = {
"Unknown", "GenuineIntel", "AuthenticAMD", "Unknown", "GenuineIntel", "AuthenticAMD", "HygonGenuine",
}; };
#if defined(__i386__) || defined(__x86_64__) #if defined(__i386__) || defined(__x86_64__)
...@@ -109,6 +109,7 @@ int get_cpu_info(struct cpupower_cpu_info *cpu_info) ...@@ -109,6 +109,7 @@ int get_cpu_info(struct cpupower_cpu_info *cpu_info)
fclose(fp); fclose(fp);
/* Get some useful CPU capabilities from cpuid */ /* Get some useful CPU capabilities from cpuid */
if (cpu_info->vendor != X86_VENDOR_AMD && if (cpu_info->vendor != X86_VENDOR_AMD &&
cpu_info->vendor != X86_VENDOR_HYGON &&
cpu_info->vendor != X86_VENDOR_INTEL) cpu_info->vendor != X86_VENDOR_INTEL)
return ret; return ret;
...@@ -124,8 +125,9 @@ int get_cpu_info(struct cpupower_cpu_info *cpu_info) ...@@ -124,8 +125,9 @@ int get_cpu_info(struct cpupower_cpu_info *cpu_info)
if (cpuid_level >= 6 && (cpuid_ecx(6) & 0x1)) if (cpuid_level >= 6 && (cpuid_ecx(6) & 0x1))
cpu_info->caps |= CPUPOWER_CAP_APERF; cpu_info->caps |= CPUPOWER_CAP_APERF;
/* AMD Boost state enable/disable register */ /* AMD or Hygon Boost state enable/disable register */
if (cpu_info->vendor == X86_VENDOR_AMD) { if (cpu_info->vendor == X86_VENDOR_AMD ||
cpu_info->vendor == X86_VENDOR_HYGON) {
if (ext_cpuid_level >= 0x80000007 && if (ext_cpuid_level >= 0x80000007 &&
(cpuid_edx(0x80000007) & (1 << 9))) (cpuid_edx(0x80000007) & (1 << 9)))
cpu_info->caps |= CPUPOWER_CAP_AMD_CBP; cpu_info->caps |= CPUPOWER_CAP_AMD_CBP;
......
...@@ -61,7 +61,7 @@ extern int be_verbose; ...@@ -61,7 +61,7 @@ extern int be_verbose;
/* cpuid and cpuinfo helpers **************************/ /* cpuid and cpuinfo helpers **************************/
enum cpupower_cpu_vendor {X86_VENDOR_UNKNOWN = 0, X86_VENDOR_INTEL, enum cpupower_cpu_vendor {X86_VENDOR_UNKNOWN = 0, X86_VENDOR_INTEL,
X86_VENDOR_AMD, X86_VENDOR_MAX}; X86_VENDOR_AMD, X86_VENDOR_HYGON, X86_VENDOR_MAX};
#define CPUPOWER_CAP_INV_TSC 0x00000001 #define CPUPOWER_CAP_INV_TSC 0x00000001
#define CPUPOWER_CAP_APERF 0x00000002 #define CPUPOWER_CAP_APERF 0x00000002
......
...@@ -26,7 +26,7 @@ int cpufreq_has_boost_support(unsigned int cpu, int *support, int *active, ...@@ -26,7 +26,7 @@ int cpufreq_has_boost_support(unsigned int cpu, int *support, int *active,
* has Hardware determined variable increments instead. * has Hardware determined variable increments instead.
*/ */
if (cpu_info.family == 0x17) { if (cpu_info.family == 0x17 || cpu_info.family == 0x18) {
if (!read_msr(cpu, MSR_AMD_HWCR, &val)) { if (!read_msr(cpu, MSR_AMD_HWCR, &val)) {
if (!(val & CPUPOWER_AMD_CPBDIS)) if (!(val & CPUPOWER_AMD_CPBDIS))
*active = 1; *active = 1;
......
...@@ -241,7 +241,8 @@ static int init_maxfreq_mode(void) ...@@ -241,7 +241,8 @@ static int init_maxfreq_mode(void)
if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_INV_TSC)) if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_INV_TSC))
goto use_sysfs; goto use_sysfs;
if (cpupower_cpu_info.vendor == X86_VENDOR_AMD) { if (cpupower_cpu_info.vendor == X86_VENDOR_AMD ||
cpupower_cpu_info.vendor == X86_VENDOR_HYGON) {
/* MSR_AMD_HWCR tells us whether TSC runs at P0/mperf /* MSR_AMD_HWCR tells us whether TSC runs at P0/mperf
* freq. * freq.
* A test whether hwcr is accessable/available would be: * A test whether hwcr is accessable/available would be:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment